This job view page is being replaced by Spyglass soon. Check out the new job view.
PRmgdevstack: Promote e2e verifying different types of services and their behaviours
ResultFAILURE
Tests 1 failed / 2470 succeeded
Started2019-08-14 10:39
Elapsed26m46s
Revision
Buildergke-prow-ssd-pool-1a225945-j823
Refs master:34791349
77865:7c64fc9d
podb5476a17-be7f-11e9-bd2d-f6f3c4187ecc
infra-commit381773791
podb5476a17-be7f-11e9-bd2d-f6f3c4187ecc
repok8s.io/kubernetes
repo-commit2776103fe7449776ae5fcb4680f8d284604348f1
repos{u'k8s.io/kubernetes': u'master:34791349d656a9f8e45b7093012e29ad08782ffa,77865:7c64fc9d98580710524814dcf365ed1044a3f841'}

Test Failures


k8s.io/kubernetes/test/integration/scheduler TestPreemptWithPermitPlugin 1m4s

go test -v k8s.io/kubernetes/test/integration/scheduler -run TestPreemptWithPermitPlugin$
=== RUN   TestPreemptWithPermitPlugin
I0814 11:01:54.608825  110631 services.go:33] Network range for service cluster IPs is unspecified. Defaulting to {10.0.0.0 ffffff00}.
I0814 11:01:54.608858  110631 services.go:45] Setting service IP to "10.0.0.1" (read-write).
I0814 11:01:54.608873  110631 master.go:278] Node port range unspecified. Defaulting to 30000-32767.
I0814 11:01:54.608884  110631 master.go:234] Using reconciler: 
I0814 11:01:54.610660  110631 storage_factory.go:285] storing podtemplates in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.610770  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.610783  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.610834  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.610953  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.611399  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.611512  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.611618  110631 store.go:1342] Monitoring podtemplates count at <storage-prefix>//podtemplates
I0814 11:01:54.611661  110631 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.611738  110631 reflector.go:160] Listing and watching *core.PodTemplate from storage/cacher.go:/podtemplates
I0814 11:01:54.611828  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.611841  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.611869  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.611928  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.612343  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.612380  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.612696  110631 store.go:1342] Monitoring events count at <storage-prefix>//events
I0814 11:01:54.612740  110631 reflector.go:160] Listing and watching *core.Event from storage/cacher.go:/events
I0814 11:01:54.612815  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.612862  110631 storage_factory.go:285] storing limitranges in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.613123  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.613140  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.613276  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.613438  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.613858  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.613877  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.613886  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.614127  110631 store.go:1342] Monitoring limitranges count at <storage-prefix>//limitranges
I0814 11:01:54.614150  110631 reflector.go:160] Listing and watching *core.LimitRange from storage/cacher.go:/limitranges
I0814 11:01:54.614168  110631 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.614237  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.614247  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.614276  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.614324  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.614612  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.614652  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.614725  110631 store.go:1342] Monitoring resourcequotas count at <storage-prefix>//resourcequotas
I0814 11:01:54.614792  110631 reflector.go:160] Listing and watching *core.ResourceQuota from storage/cacher.go:/resourcequotas
I0814 11:01:54.614910  110631 storage_factory.go:285] storing secrets in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.614977  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.614989  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.615021  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.615077  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.615389  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.615568  110631 store.go:1342] Monitoring secrets count at <storage-prefix>//secrets
I0814 11:01:54.615643  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.615731  110631 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.615810  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.615821  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.615853  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.615907  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.615939  110631 reflector.go:160] Listing and watching *core.Secret from storage/cacher.go:/secrets
I0814 11:01:54.615984  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.616118  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.616378  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.616454  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.616677  110631 store.go:1342] Monitoring persistentvolumes count at <storage-prefix>//persistentvolumes
I0814 11:01:54.616740  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.616859  110631 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.616924  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.616935  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.616973  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.617108  110631 reflector.go:160] Listing and watching *core.PersistentVolume from storage/cacher.go:/persistentvolumes
I0814 11:01:54.617128  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.617411  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.617570  110631 store.go:1342] Monitoring persistentvolumeclaims count at <storage-prefix>//persistentvolumeclaims
I0814 11:01:54.617709  110631 storage_factory.go:285] storing configmaps in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.617770  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.617781  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.617815  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.617877  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.617906  110631 reflector.go:160] Listing and watching *core.PersistentVolumeClaim from storage/cacher.go:/persistentvolumeclaims
I0814 11:01:54.618072  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.618316  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.618321  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.618354  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.618720  110631 store.go:1342] Monitoring configmaps count at <storage-prefix>//configmaps
I0814 11:01:54.618789  110631 reflector.go:160] Listing and watching *core.ConfigMap from storage/cacher.go:/configmaps
I0814 11:01:54.618859  110631 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.618949  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.618960  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.618988  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.619040  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.619248  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.619284  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.619359  110631 store.go:1342] Monitoring namespaces count at <storage-prefix>//namespaces
I0814 11:01:54.619427  110631 reflector.go:160] Listing and watching *core.Namespace from storage/cacher.go:/namespaces
I0814 11:01:54.619521  110631 storage_factory.go:285] storing endpoints in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.619588  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.619598  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.619626  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.619675  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.619988  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.620151  110631 store.go:1342] Monitoring endpoints count at <storage-prefix>//services/endpoints
I0814 11:01:54.620260  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.620328  110631 reflector.go:160] Listing and watching *core.Endpoints from storage/cacher.go:/services/endpoints
I0814 11:01:54.620327  110631 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.620545  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.620567  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.620614  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.620770  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.621151  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.621217  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.621283  110631 store.go:1342] Monitoring nodes count at <storage-prefix>//minions
I0814 11:01:54.621398  110631 reflector.go:160] Listing and watching *core.Node from storage/cacher.go:/minions
I0814 11:01:54.621429  110631 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.621510  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.621522  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.621575  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.621625  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.621884  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.622003  110631 store.go:1342] Monitoring pods count at <storage-prefix>//pods
I0814 11:01:54.622144  110631 storage_factory.go:285] storing serviceaccounts in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.622210  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.622220  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.622256  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.622299  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.622336  110631 reflector.go:160] Listing and watching *core.Pod from storage/cacher.go:/pods
I0814 11:01:54.622354  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.622797  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.622839  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.622933  110631 store.go:1342] Monitoring serviceaccounts count at <storage-prefix>//serviceaccounts
I0814 11:01:54.623042  110631 reflector.go:160] Listing and watching *core.ServiceAccount from storage/cacher.go:/serviceaccounts
I0814 11:01:54.623069  110631 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.623140  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.623151  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.623362  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.623420  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.623705  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.623816  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.623846  110631 store.go:1342] Monitoring services count at <storage-prefix>//services/specs
I0814 11:01:54.623872  110631 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.623889  110631 reflector.go:160] Listing and watching *core.Service from storage/cacher.go:/services/specs
I0814 11:01:54.623963  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.623975  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.624004  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.624191  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.624442  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.624559  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.624571  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.624598  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.624608  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.624686  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.625037  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.625106  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.625207  110631 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.625285  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.625296  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.625330  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.625380  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.625588  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.625643  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.625669  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.625697  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.625807  110631 store.go:1342] Monitoring replicationcontrollers count at <storage-prefix>//controllers
I0814 11:01:54.625883  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.626025  110631 reflector.go:160] Listing and watching *core.ReplicationController from storage/cacher.go:/controllers
I0814 11:01:54.626074  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.626365  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.626388  110631 storage_factory.go:285] storing bindings in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.626617  110631 storage_factory.go:285] storing componentstatuses in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.626824  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.627377  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.627926  110631 storage_factory.go:285] storing configmaps in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.628896  110631 storage_factory.go:285] storing endpoints in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.629367  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.629739  110631 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.630203  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.630961  110631 storage_factory.go:285] storing limitranges in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.631796  110631 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.631986  110631 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.632361  110631 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.633320  110631 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.634106  110631 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.634331  110631 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.635244  110631 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.635716  110631 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.636337  110631 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.636639  110631 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.637352  110631 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.637612  110631 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.637757  110631 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.637895  110631 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.638088  110631 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.638228  110631 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.638384  110631 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.639094  110631 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.639424  110631 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.640300  110631 storage_factory.go:285] storing podtemplates in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.641115  110631 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.641388  110631 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.641889  110631 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.642707  110631 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.643052  110631 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.643757  110631 storage_factory.go:285] storing secrets in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.644506  110631 storage_factory.go:285] storing serviceaccounts in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.645165  110631 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.645998  110631 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.646296  110631 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.646431  110631 master.go:423] Skipping disabled API group "auditregistration.k8s.io".
I0814 11:01:54.646455  110631 master.go:434] Enabling API group "authentication.k8s.io".
I0814 11:01:54.646496  110631 master.go:434] Enabling API group "authorization.k8s.io".
I0814 11:01:54.646665  110631 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.646782  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.646837  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.646916  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.647011  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.647382  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.647582  110631 store.go:1342] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0814 11:01:54.647750  110631 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.647842  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.647856  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.647893  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.647949  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.647998  110631 reflector.go:160] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0814 11:01:54.648218  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.648573  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.648639  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.648698  110631 store.go:1342] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0814 11:01:54.648763  110631 reflector.go:160] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0814 11:01:54.648854  110631 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.648927  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.648940  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.649004  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.649047  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.649319  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.649386  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.649423  110631 store.go:1342] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0814 11:01:54.649439  110631 master.go:434] Enabling API group "autoscaling".
I0814 11:01:54.649491  110631 reflector.go:160] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0814 11:01:54.649588  110631 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.649978  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.649998  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.650033  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.650083  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.650086  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.650537  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.650676  110631 store.go:1342] Monitoring jobs.batch count at <storage-prefix>//jobs
I0814 11:01:54.650819  110631 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.650907  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.650917  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.650982  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.651046  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.651106  110631 reflector.go:160] Listing and watching *batch.Job from storage/cacher.go:/jobs
I0814 11:01:54.651319  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.651381  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.652095  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.652265  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.652513  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.652537  110631 store.go:1342] Monitoring cronjobs.batch count at <storage-prefix>//cronjobs
I0814 11:01:54.652610  110631 reflector.go:160] Listing and watching *batch.CronJob from storage/cacher.go:/cronjobs
I0814 11:01:54.652666  110631 master.go:434] Enabling API group "batch".
I0814 11:01:54.652895  110631 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.652984  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.652994  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.653027  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.653115  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.653494  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.653614  110631 store.go:1342] Monitoring certificatesigningrequests.certificates.k8s.io count at <storage-prefix>//certificatesigningrequests
I0814 11:01:54.653634  110631 master.go:434] Enabling API group "certificates.k8s.io".
I0814 11:01:54.653670  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.653787  110631 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.653876  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.653886  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.653920  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.653809  110631 reflector.go:160] Listing and watching *certificates.CertificateSigningRequest from storage/cacher.go:/certificatesigningrequests
I0814 11:01:54.654109  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.654380  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.654521  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.654550  110631 store.go:1342] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I0814 11:01:54.654592  110631 reflector.go:160] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I0814 11:01:54.654746  110631 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.654824  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.654835  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.654863  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.654917  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.654992  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.655331  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.655397  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.655546  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.655563  110631 store.go:1342] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I0814 11:01:54.655578  110631 master.go:434] Enabling API group "coordination.k8s.io".
I0814 11:01:54.655638  110631 reflector.go:160] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I0814 11:01:54.655714  110631 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.655772  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.655781  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.655808  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.655888  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.656155  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.656272  110631 store.go:1342] Monitoring ingresses.networking.k8s.io count at <storage-prefix>//ingress
I0814 11:01:54.656280  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.656295  110631 master.go:434] Enabling API group "extensions".
I0814 11:01:54.656325  110631 reflector.go:160] Listing and watching *networking.Ingress from storage/cacher.go:/ingress
I0814 11:01:54.656440  110631 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.656608  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.656621  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.656656  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.656777  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.657022  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.657055  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.657158  110631 store.go:1342] Monitoring networkpolicies.networking.k8s.io count at <storage-prefix>//networkpolicies
I0814 11:01:54.657197  110631 reflector.go:160] Listing and watching *networking.NetworkPolicy from storage/cacher.go:/networkpolicies
I0814 11:01:54.657309  110631 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.657390  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.657402  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.657432  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.657520  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.657798  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.657856  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.657924  110631 store.go:1342] Monitoring ingresses.networking.k8s.io count at <storage-prefix>//ingress
I0814 11:01:54.657944  110631 master.go:434] Enabling API group "networking.k8s.io".
I0814 11:01:54.657979  110631 storage_factory.go:285] storing runtimeclasses.node.k8s.io in node.k8s.io/v1beta1, reading as node.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.658070  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.658081  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.658110  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.658148  110631 reflector.go:160] Listing and watching *networking.Ingress from storage/cacher.go:/ingress
I0814 11:01:54.658670  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.658736  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.659120  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.659203  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.659327  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.659580  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.659948  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.660123  110631 store.go:1342] Monitoring runtimeclasses.node.k8s.io count at <storage-prefix>//runtimeclasses
I0814 11:01:54.660149  110631 master.go:434] Enabling API group "node.k8s.io".
I0814 11:01:54.660324  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.660358  110631 reflector.go:160] Listing and watching *node.RuntimeClass from storage/cacher.go:/runtimeclasses
I0814 11:01:54.660342  110631 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.660758  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.660815  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.660852  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.660999  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.661233  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.661361  110631 store.go:1342] Monitoring poddisruptionbudgets.policy count at <storage-prefix>//poddisruptionbudgets
I0814 11:01:54.661556  110631 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.661629  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.661640  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.661675  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.661722  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.661754  110631 reflector.go:160] Listing and watching *policy.PodDisruptionBudget from storage/cacher.go:/poddisruptionbudgets
I0814 11:01:54.662009  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.662306  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.662517  110631 store.go:1342] Monitoring podsecuritypolicies.policy count at <storage-prefix>//podsecuritypolicy
I0814 11:01:54.662579  110631 master.go:434] Enabling API group "policy".
I0814 11:01:54.662616  110631 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.662675  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.662686  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.662742  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.662785  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.662813  110631 reflector.go:160] Listing and watching *policy.PodSecurityPolicy from storage/cacher.go:/podsecuritypolicy
I0814 11:01:54.663169  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.663424  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.663552  110631 store.go:1342] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I0814 11:01:54.663589  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.663630  110631 reflector.go:160] Listing and watching *rbac.Role from storage/cacher.go:/roles
I0814 11:01:54.663711  110631 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.663783  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.663795  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.663845  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.663900  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.664170  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.664278  110631 store.go:1342] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I0814 11:01:54.664306  110631 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.664444  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.664539  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.664717  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.664770  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.664771  110631 reflector.go:160] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I0814 11:01:54.664906  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.665813  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.665930  110631 store.go:1342] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I0814 11:01:54.666062  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.666089  110631 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.666161  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.666173  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.666235  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.666283  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.666328  110631 reflector.go:160] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I0814 11:01:54.666675  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.666906  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.666927  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.667156  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.667570  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.667697  110631 store.go:1342] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I0814 11:01:54.667764  110631 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.667845  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.667856  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.667889  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.667985  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.668022  110631 reflector.go:160] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I0814 11:01:54.668256  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.668533  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.668617  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.668649  110631 store.go:1342] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I0814 11:01:54.668797  110631 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.668859  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.668870  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.668901  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.668934  110631 reflector.go:160] Listing and watching *rbac.Role from storage/cacher.go:/roles
I0814 11:01:54.669165  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.669502  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.669590  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.669603  110631 store.go:1342] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I0814 11:01:54.669639  110631 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.669713  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.669721  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.669741  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.669775  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.669851  110631 reflector.go:160] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I0814 11:01:54.671373  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.671607  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.671374  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.671704  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.672081  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.672202  110631 store.go:1342] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I0814 11:01:54.672356  110631 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.672423  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.672433  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.672522  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.672601  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.672635  110631 reflector.go:160] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I0814 11:01:54.672818  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.673002  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.673141  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.673245  110631 store.go:1342] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I0814 11:01:54.673272  110631 master.go:434] Enabling API group "rbac.authorization.k8s.io".
I0814 11:01:54.673353  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.673374  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.673410  110631 reflector.go:160] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I0814 11:01:54.673827  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.674496  110631 watch_cache.go:405] Replace watchCache (rev: 29418) 
I0814 11:01:54.675926  110631 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.676049  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.676062  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.676104  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.676332  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.676713  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.676749  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.677053  110631 store.go:1342] Monitoring priorityclasses.scheduling.k8s.io count at <storage-prefix>//priorityclasses
I0814 11:01:54.677110  110631 reflector.go:160] Listing and watching *scheduling.PriorityClass from storage/cacher.go:/priorityclasses
I0814 11:01:54.677495  110631 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.677595  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.677609  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.677693  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.677757  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.678042  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.678090  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.678922  110631 watch_cache.go:405] Replace watchCache (rev: 29419) 
I0814 11:01:54.679233  110631 store.go:1342] Monitoring priorityclasses.scheduling.k8s.io count at <storage-prefix>//priorityclasses
I0814 11:01:54.679259  110631 master.go:434] Enabling API group "scheduling.k8s.io".
I0814 11:01:54.679353  110631 reflector.go:160] Listing and watching *scheduling.PriorityClass from storage/cacher.go:/priorityclasses
I0814 11:01:54.680349  110631 master.go:423] Skipping disabled API group "settings.k8s.io".
I0814 11:01:54.680499  110631 watch_cache.go:405] Replace watchCache (rev: 29419) 
I0814 11:01:54.681029  110631 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.681587  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.681609  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.681715  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.681829  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.682611  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.682787  110631 store.go:1342] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I0814 11:01:54.682945  110631 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.683002  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.683013  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.683050  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.683106  110631 reflector.go:160] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I0814 11:01:54.683268  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.683492  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.683899  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.684028  110631 store.go:1342] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I0814 11:01:54.684066  110631 storage_factory.go:285] storing csinodes.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.684131  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.684149  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.684181  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.684245  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.684313  110631 reflector.go:160] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I0814 11:01:54.684564  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.684945  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.685100  110631 store.go:1342] Monitoring csinodes.storage.k8s.io count at <storage-prefix>//csinodes
I0814 11:01:54.685151  110631 storage_factory.go:285] storing csidrivers.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.685275  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.685300  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.685338  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.685404  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.685441  110631 reflector.go:160] Listing and watching *storage.CSINode from storage/cacher.go:/csinodes
I0814 11:01:54.685881  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.687183  110631 watch_cache.go:405] Replace watchCache (rev: 29419) 
I0814 11:01:54.687184  110631 watch_cache.go:405] Replace watchCache (rev: 29419) 
I0814 11:01:54.687315  110631 watch_cache.go:405] Replace watchCache (rev: 29419) 
I0814 11:01:54.687681  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.687772  110631 store.go:1342] Monitoring csidrivers.storage.k8s.io count at <storage-prefix>//csidrivers
I0814 11:01:54.687773  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.687919  110631 reflector.go:160] Listing and watching *storage.CSIDriver from storage/cacher.go:/csidrivers
I0814 11:01:54.687952  110631 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.688058  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.688070  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.688102  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.688192  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.688525  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.688728  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.688879  110631 store.go:1342] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I0814 11:01:54.689143  110631 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.689253  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.689264  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.689425  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.689513  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.689853  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.689955  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.689984  110631 store.go:1342] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I0814 11:01:54.690003  110631 master.go:434] Enabling API group "storage.k8s.io".
I0814 11:01:54.690038  110631 reflector.go:160] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I0814 11:01:54.690341  110631 reflector.go:160] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I0814 11:01:54.691289  110631 watch_cache.go:405] Replace watchCache (rev: 29420) 
I0814 11:01:54.691292  110631 watch_cache.go:405] Replace watchCache (rev: 29420) 
I0814 11:01:54.691424  110631 watch_cache.go:405] Replace watchCache (rev: 29420) 
I0814 11:01:54.690225  110631 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.692478  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.692499  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.692535  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.692594  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.693571  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.693614  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.693777  110631 store.go:1342] Monitoring deployments.apps count at <storage-prefix>//deployments
I0814 11:01:54.693957  110631 reflector.go:160] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0814 11:01:54.693955  110631 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.694035  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.694055  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.694089  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.694170  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.694502  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.694538  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.694831  110631 store.go:1342] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I0814 11:01:54.694860  110631 watch_cache.go:405] Replace watchCache (rev: 29420) 
I0814 11:01:54.694913  110631 reflector.go:160] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I0814 11:01:54.694993  110631 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.695050  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.695058  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.695088  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.695133  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.695456  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.695641  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.695677  110631 store.go:1342] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I0814 11:01:54.695766  110631 reflector.go:160] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I0814 11:01:54.695995  110631 watch_cache.go:405] Replace watchCache (rev: 29420) 
I0814 11:01:54.696341  110631 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.696438  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.696454  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.696505  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.696839  110631 watch_cache.go:405] Replace watchCache (rev: 29420) 
I0814 11:01:54.697209  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.697666  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.697744  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.697797  110631 store.go:1342] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I0814 11:01:54.697900  110631 reflector.go:160] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I0814 11:01:54.697942  110631 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.698015  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.698027  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.698097  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.698354  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.698665  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.698726  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.698789  110631 store.go:1342] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I0814 11:01:54.698816  110631 master.go:434] Enabling API group "apps".
I0814 11:01:54.698852  110631 reflector.go:160] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I0814 11:01:54.698852  110631 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.698941  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.698961  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.698994  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.699054  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.699357  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.699549  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.699671  110631 store.go:1342] Monitoring validatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//validatingwebhookconfigurations
I0814 11:01:54.699814  110631 reflector.go:160] Listing and watching *admissionregistration.ValidatingWebhookConfiguration from storage/cacher.go:/validatingwebhookconfigurations
I0814 11:01:54.699896  110631 watch_cache.go:405] Replace watchCache (rev: 29420) 
I0814 11:01:54.699975  110631 watch_cache.go:405] Replace watchCache (rev: 29420) 
I0814 11:01:54.700155  110631 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.700795  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.700803  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.700845  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.700941  110631 watch_cache.go:405] Replace watchCache (rev: 29420) 
I0814 11:01:54.700972  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.701235  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.701281  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.701452  110631 store.go:1342] Monitoring mutatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//mutatingwebhookconfigurations
I0814 11:01:54.701515  110631 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.701562  110631 reflector.go:160] Listing and watching *admissionregistration.MutatingWebhookConfiguration from storage/cacher.go:/mutatingwebhookconfigurations
I0814 11:01:54.701599  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.701615  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.701756  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.701997  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.702384  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.702514  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.702526  110631 store.go:1342] Monitoring validatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//validatingwebhookconfigurations
I0814 11:01:54.702567  110631 reflector.go:160] Listing and watching *admissionregistration.ValidatingWebhookConfiguration from storage/cacher.go:/validatingwebhookconfigurations
I0814 11:01:54.702564  110631 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.702690  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.702701  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.702733  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.702860  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.702941  110631 watch_cache.go:405] Replace watchCache (rev: 29420) 
I0814 11:01:54.703117  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.703186  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.703271  110631 store.go:1342] Monitoring mutatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//mutatingwebhookconfigurations
I0814 11:01:54.703300  110631 master.go:434] Enabling API group "admissionregistration.k8s.io".
I0814 11:01:54.703318  110631 reflector.go:160] Listing and watching *admissionregistration.MutatingWebhookConfiguration from storage/cacher.go:/mutatingwebhookconfigurations
I0814 11:01:54.703339  110631 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.703763  110631 watch_cache.go:405] Replace watchCache (rev: 29420) 
I0814 11:01:54.704541  110631 watch_cache.go:405] Replace watchCache (rev: 29420) 
I0814 11:01:54.704565  110631 client.go:354] parsed scheme: ""
I0814 11:01:54.704578  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:54.704735  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:54.704841  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.705152  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:54.705265  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:54.705426  110631 store.go:1342] Monitoring events count at <storage-prefix>//events
I0814 11:01:54.705445  110631 master.go:434] Enabling API group "events.k8s.io".
I0814 11:01:54.705496  110631 reflector.go:160] Listing and watching *core.Event from storage/cacher.go:/events
I0814 11:01:54.705706  110631 storage_factory.go:285] storing tokenreviews.authentication.k8s.io in authentication.k8s.io/v1, reading as authentication.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.705881  110631 storage_factory.go:285] storing tokenreviews.authentication.k8s.io in authentication.k8s.io/v1, reading as authentication.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.706123  110631 storage_factory.go:285] storing localsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.706215  110631 storage_factory.go:285] storing selfsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.706280  110631 watch_cache.go:405] Replace watchCache (rev: 29420) 
I0814 11:01:54.706318  110631 storage_factory.go:285] storing selfsubjectrulesreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.706406  110631 storage_factory.go:285] storing subjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.706658  110631 storage_factory.go:285] storing localsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.706746  110631 storage_factory.go:285] storing selfsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.706858  110631 storage_factory.go:285] storing selfsubjectrulesreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.707014  110631 storage_factory.go:285] storing subjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.708424  110631 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.708978  110631 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.710743  110631 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.711205  110631 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.712363  110631 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.712794  110631 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.713733  110631 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.714026  110631 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.715078  110631 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.715440  110631 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0814 11:01:54.715529  110631 genericapiserver.go:390] Skipping API batch/v2alpha1 because it has no resources.
I0814 11:01:54.716357  110631 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.716529  110631 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.716834  110631 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.717784  110631 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.718617  110631 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.719592  110631 storage_factory.go:285] storing ingresses.extensions in extensions/v1beta1, reading as extensions/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.719827  110631 storage_factory.go:285] storing ingresses.extensions in extensions/v1beta1, reading as extensions/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.720770  110631 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.721628  110631 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.721869  110631 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.722621  110631 storage_factory.go:285] storing runtimeclasses.node.k8s.io in node.k8s.io/v1beta1, reading as node.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0814 11:01:54.722689  110631 genericapiserver.go:390] Skipping API node.k8s.io/v1alpha1 because it has no resources.
I0814 11:01:54.723541  110631 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.724222  110631 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.724770  110631 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.725786  110631 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.726372  110631 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.727194  110631 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.728226  110631 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.728926  110631 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.729424  110631 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.730286  110631 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.731011  110631 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0814 11:01:54.731083  110631 genericapiserver.go:390] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources.
I0814 11:01:54.731562  110631 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.732070  110631 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0814 11:01:54.732130  110631 genericapiserver.go:390] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources.
I0814 11:01:54.732733  110631 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.733438  110631 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.733670  110631 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.734131  110631 storage_factory.go:285] storing csidrivers.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.734630  110631 storage_factory.go:285] storing csinodes.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.735025  110631 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.735498  110631 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0814 11:01:54.735568  110631 genericapiserver.go:390] Skipping API storage.k8s.io/v1alpha1 because it has no resources.
I0814 11:01:54.736302  110631 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.736970  110631 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.737178  110631 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.737818  110631 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.738159  110631 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.738360  110631 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.739020  110631 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.739256  110631 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.739549  110631 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.740233  110631 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.740450  110631 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.740685  110631 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0814 11:01:54.740825  110631 genericapiserver.go:390] Skipping API apps/v1beta2 because it has no resources.
W0814 11:01:54.740841  110631 genericapiserver.go:390] Skipping API apps/v1beta1 because it has no resources.
I0814 11:01:54.741492  110631 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.741979  110631 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.742526  110631 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.743142  110631 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.743800  110631 storage_factory.go:285] storing events.events.k8s.io in events.k8s.io/v1beta1, reading as events.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"587e2f37-98f6-4d4b-a365-553ce9fb180a", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0814 11:01:54.746138  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:54.746168  110631 healthz.go:169] healthz check poststarthook/bootstrap-controller failed: not finished
I0814 11:01:54.746180  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:54.746204  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:54.746215  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:54.746222  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[-]poststarthook/bootstrap-controller failed: reason withheld
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:54.746252  110631 httplog.go:90] GET /healthz: (184.487µs) 0 [Go-http-client/1.1 127.0.0.1:45644]
I0814 11:01:54.747292  110631 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.55115ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:54.750414  110631 httplog.go:90] GET /api/v1/services: (1.249348ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:54.754422  110631 httplog.go:90] GET /api/v1/services: (1.111796ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:54.756606  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:54.756637  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:54.756651  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:54.756662  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:54.756670  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:54.756695  110631 httplog.go:90] GET /healthz: (187.015µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:54.757658  110631 httplog.go:90] GET /api/v1/namespaces/kube-system: (980.831µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45644]
I0814 11:01:54.759020  110631 httplog.go:90] GET /api/v1/services: (1.62355ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:54.760158  110631 httplog.go:90] GET /api/v1/services: (2.410649ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45648]
I0814 11:01:54.762087  110631 httplog.go:90] POST /api/v1/namespaces: (3.267051ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45644]
I0814 11:01:54.763600  110631 httplog.go:90] GET /api/v1/namespaces/kube-public: (1.065062ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:54.765545  110631 httplog.go:90] POST /api/v1/namespaces: (1.500257ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:54.767100  110631 httplog.go:90] GET /api/v1/namespaces/kube-node-lease: (1.100595ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:54.769389  110631 httplog.go:90] POST /api/v1/namespaces: (1.673845ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:54.847054  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:54.847200  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:54.847219  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:54.847228  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:54.847237  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:54.847276  110631 httplog.go:90] GET /healthz: (356.691µs) 0 [Go-http-client/1.1 127.0.0.1:45642]
I0814 11:01:54.857785  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:54.857818  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:54.857829  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:54.857836  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:54.857842  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:54.857865  110631 httplog.go:90] GET /healthz: (198.654µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:54.947076  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:54.947905  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:54.948084  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:54.948192  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:54.948287  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:54.948512  110631 httplog.go:90] GET /healthz: (1.576031ms) 0 [Go-http-client/1.1 127.0.0.1:45642]
I0814 11:01:54.958026  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:54.958211  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:54.958304  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:54.958431  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:54.958596  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:54.958855  110631 httplog.go:90] GET /healthz: (1.042823ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:55.047147  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:55.047190  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.047204  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:55.047214  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:55.047222  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:55.047353  110631 httplog.go:90] GET /healthz: (330.355µs) 0 [Go-http-client/1.1 127.0.0.1:45642]
I0814 11:01:55.057917  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:55.057959  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.057972  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:55.057986  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:55.057996  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:55.058036  110631 httplog.go:90] GET /healthz: (274.128µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:55.147127  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:55.147166  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.147179  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:55.147189  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:55.147199  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:55.147237  110631 httplog.go:90] GET /healthz: (300.159µs) 0 [Go-http-client/1.1 127.0.0.1:45642]
I0814 11:01:55.157894  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:55.157931  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.157945  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:55.157955  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:55.157963  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:55.157992  110631 httplog.go:90] GET /healthz: (251.328µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:55.247173  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:55.247211  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.247224  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:55.247235  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:55.247243  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:55.247282  110631 httplog.go:90] GET /healthz: (265.37µs) 0 [Go-http-client/1.1 127.0.0.1:45642]
I0814 11:01:55.257820  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:55.257868  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.257896  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:55.257907  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:55.257917  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:55.257959  110631 httplog.go:90] GET /healthz: (292.532µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:55.347081  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:55.347129  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.347143  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:55.347154  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:55.347163  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:55.347194  110631 httplog.go:90] GET /healthz: (254.427µs) 0 [Go-http-client/1.1 127.0.0.1:45642]
I0814 11:01:55.357912  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:55.357965  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.357979  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:55.357991  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:55.358001  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:55.358034  110631 httplog.go:90] GET /healthz: (279.862µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:55.447128  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:55.447175  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.447190  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:55.447203  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:55.447213  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:55.447258  110631 httplog.go:90] GET /healthz: (309.759µs) 0 [Go-http-client/1.1 127.0.0.1:45642]
I0814 11:01:55.457890  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:55.457933  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.457947  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:55.457958  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:55.457966  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:55.457998  110631 httplog.go:90] GET /healthz: (250.717µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:55.547060  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:55.547105  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.547119  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:55.547130  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:55.547150  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:55.547189  110631 httplog.go:90] GET /healthz: (287.112µs) 0 [Go-http-client/1.1 127.0.0.1:45642]
I0814 11:01:55.557921  110631 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0814 11:01:55.557965  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.557978  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:55.557988  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:55.557997  110631 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:55.558025  110631 httplog.go:90] GET /healthz: (261.754µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:55.608518  110631 client.go:354] parsed scheme: ""
I0814 11:01:55.608558  110631 client.go:354] scheme "" not registered, fallback to default scheme
I0814 11:01:55.608617  110631 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0814 11:01:55.608697  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:55.609328  110631 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0814 11:01:55.609398  110631 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0814 11:01:55.665939  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.665974  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:55.665985  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:55.665984  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.665997  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:55.666010  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:55.666021  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:55.666029  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:55.666047  110631 httplog.go:90] GET /healthz: (9.343533ms) 0 [Go-http-client/1.1 127.0.0.1:45642]
I0814 11:01:55.666071  110631 httplog.go:90] GET /healthz: (8.39145ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45646]
I0814 11:01:55.747102  110631 httplog.go:90] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-node-critical: (1.286561ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45646]
I0814 11:01:55.747358  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.545506ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:55.748695  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.748720  110631 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0814 11:01:55.748731  110631 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0814 11:01:55.748739  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0814 11:01:55.748770  110631 httplog.go:90] GET /healthz: (1.739462ms) 0 [Go-http-client/1.1 127.0.0.1:45664]
I0814 11:01:55.749501  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.668952ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:55.751103  110631 httplog.go:90] GET /api/v1/namespaces/kube-system: (2.140653ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:55.751630  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (1.677134ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:55.752019  110631 httplog.go:90] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (1.667505ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45646]
I0814 11:01:55.752179  110631 storage_scheduling.go:139] created PriorityClass system-node-critical with value 2000001000
I0814 11:01:55.753519  110631 httplog.go:90] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-cluster-critical: (850.267µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45646]
I0814 11:01:55.753543  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (1.407366ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45642]
I0814 11:01:55.754678  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (775.095µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45664]
I0814 11:01:55.755392  110631 httplog.go:90] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (1.307635ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45646]
I0814 11:01:55.755569  110631 storage_scheduling.go:139] created PriorityClass system-cluster-critical with value 2000000000
I0814 11:01:55.755585  110631 storage_scheduling.go:148] all system priority classes are created successfully or already exist.
I0814 11:01:55.755722  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (672.023µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45664]
I0814 11:01:55.756667  110631 httplog.go:90] GET /api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication: (4.804071ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:55.756764  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (767.422µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45664]
I0814 11:01:55.758555  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (1.253937ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45646]
I0814 11:01:55.758957  110631 httplog.go:90] POST /api/v1/namespaces/kube-system/configmaps: (1.694916ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:55.759399  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.759426  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:55.759604  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:discovery: (719.223µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45646]
I0814 11:01:55.759658  110631 httplog.go:90] GET /healthz: (1.600865ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.760758  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/cluster-admin: (865.854µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45646]
I0814 11:01:55.762830  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.672524ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.763131  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/cluster-admin
I0814 11:01:55.764153  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:discovery: (846.498µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.766277  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.748138ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.766513  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:discovery
I0814 11:01:55.767732  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:basic-user: (1.031653ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.769543  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.328749ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.769908  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:basic-user
I0814 11:01:55.771240  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:public-info-viewer: (856.007µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.773589  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.746078ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.773919  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:public-info-viewer
I0814 11:01:55.774956  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (881.293µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.776951  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.53249ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.777413  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/admin
I0814 11:01:55.778762  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (1.088533ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.783724  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (4.487714ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.784037  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/edit
I0814 11:01:55.785171  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (908.97µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.787186  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.55137ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.787564  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/view
I0814 11:01:55.788671  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (842.389µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.790864  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.694064ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.791162  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-admin
I0814 11:01:55.792704  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (1.248294ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.795002  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.786011ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.795551  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-edit
I0814 11:01:55.796705  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (829.838µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.799614  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.413983ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.799876  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-view
I0814 11:01:55.801262  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:heapster: (952.502µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.803367  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.584139ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.803703  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:heapster
I0814 11:01:55.805082  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node: (1.047001ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.807870  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.309911ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.808308  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:node
I0814 11:01:55.809689  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-problem-detector: (1.157868ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.811898  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.839253ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.812068  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:node-problem-detector
I0814 11:01:55.813323  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-proxier: (1.083913ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.815256  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.446425ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.815450  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:node-proxier
I0814 11:01:55.816576  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kubelet-api-admin: (922.443µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.818580  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.517142ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.818913  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:kubelet-api-admin
I0814 11:01:55.820188  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-bootstrapper: (1.065905ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.822524  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.942772ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.822720  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:node-bootstrapper
I0814 11:01:55.824018  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:auth-delegator: (1.118698ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.827201  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.669742ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.827434  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:auth-delegator
I0814 11:01:55.829210  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-aggregator: (1.462125ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.830735  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.136258ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.830894  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:kube-aggregator
I0814 11:01:55.831910  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-controller-manager: (808.078µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.833629  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.354205ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.834020  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:kube-controller-manager
I0814 11:01:55.835546  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-dns: (1.267649ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.837220  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.242533ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.837586  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:kube-dns
I0814 11:01:55.839095  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:persistent-volume-provisioner: (1.286074ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.841518  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.894069ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.841785  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:persistent-volume-provisioner
I0814 11:01:55.843044  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-attacher: (1.023199ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.845662  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.12203ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.845871  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:csi-external-attacher
I0814 11:01:55.847401  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:nodeclient: (1.369266ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.847960  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.848003  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:55.848031  110631 httplog.go:90] GET /healthz: (951.117µs) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:55.849529  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.551129ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.849910  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:nodeclient
I0814 11:01:55.851595  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient: (1.393886ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.853969  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.91472ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.854317  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
I0814 11:01:55.855874  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:volume-scheduler: (1.18389ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.857830  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.589595ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.858800  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.859039  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:55.859349  110631 httplog.go:90] GET /healthz: (1.615426ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:55.858875  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:volume-scheduler
I0814 11:01:55.861099  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-scheduler: (1.17481ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.863883  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.065577ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.864131  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:kube-scheduler
I0814 11:01:55.865715  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-provisioner: (1.420696ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.868915  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.739674ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.869341  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:csi-external-provisioner
I0814 11:01:55.870563  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:attachdetach-controller: (903.468µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.872916  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.79233ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.873110  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I0814 11:01:55.874495  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:clusterrole-aggregation-controller: (1.089128ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.876176  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.298832ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.876337  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I0814 11:01:55.877370  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:cronjob-controller: (914.918µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.879214  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.397796ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.879489  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:cronjob-controller
I0814 11:01:55.880624  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:daemon-set-controller: (963.083µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.882973  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.88987ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.883314  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I0814 11:01:55.884505  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:deployment-controller: (863.873µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.886282  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.286415ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.886661  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:deployment-controller
I0814 11:01:55.888118  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:disruption-controller: (1.245465ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.891133  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.506134ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.891527  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:disruption-controller
I0814 11:01:55.892932  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:endpoint-controller: (1.183466ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.895131  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.660945ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.895501  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:endpoint-controller
I0814 11:01:55.896736  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:expand-controller: (870.907µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.899017  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.92419ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.899602  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:expand-controller
I0814 11:01:55.900450  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:generic-garbage-collector: (669.269µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.901980  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.216063ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.902164  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I0814 11:01:55.903029  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:horizontal-pod-autoscaler: (696.817µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.904724  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.337082ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.905055  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I0814 11:01:55.906080  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:job-controller: (723.149µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.907682  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.127251ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.908038  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:job-controller
I0814 11:01:55.909187  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:namespace-controller: (857.913µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.912584  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.596117ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.913054  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:namespace-controller
I0814 11:01:55.914092  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:node-controller: (835.988µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.915895  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.275829ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.916163  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:node-controller
I0814 11:01:55.917406  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:persistent-volume-binder: (975.879µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.919086  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.175757ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.919454  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I0814 11:01:55.920321  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pod-garbage-collector: (683.716µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.922133  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.37973ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.922394  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I0814 11:01:55.923320  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replicaset-controller: (753.078µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.925248  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.599302ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.925618  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:replicaset-controller
I0814 11:01:55.926762  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replication-controller: (894.12µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.928785  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.63045ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.929010  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:replication-controller
I0814 11:01:55.930442  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:resourcequota-controller: (1.148505ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.932110  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.130783ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.932382  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I0814 11:01:55.933711  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:route-controller: (1.056179ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.936001  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.673089ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.936359  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:route-controller
I0814 11:01:55.937727  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-account-controller: (1.118185ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.939631  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.38799ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.939918  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:service-account-controller
I0814 11:01:55.941078  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-controller: (879.947µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.943046  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.61318ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.943439  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:service-controller
I0814 11:01:55.944414  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:statefulset-controller: (716.275µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.946842  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.901218ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.947047  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:statefulset-controller
I0814 11:01:55.947495  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.947677  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:55.947942  110631 httplog.go:90] GET /healthz: (1.138972ms) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:55.947992  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:ttl-controller: (795.603µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.950866  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.964214ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.951093  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:ttl-controller
I0814 11:01:55.952427  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:certificate-controller: (1.16943ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.958898  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:55.958962  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:55.959027  110631 httplog.go:90] GET /healthz: (1.162017ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.979839  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.876333ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:55.980336  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:certificate-controller
I0814 11:01:55.987361  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pvc-protection-controller: (1.418946ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.028040  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.163132ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.028624  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I0814 11:01:56.029993  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pv-protection-controller: (1.040417ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.049218  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.234769ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.049506  110631 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I0814 11:01:56.050212  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.050240  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.050284  110631 httplog.go:90] GET /healthz: (3.347558ms) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:56.058866  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.058904  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.058951  110631 httplog.go:90] GET /healthz: (1.258629ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.070325  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/cluster-admin: (1.09087ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.091003  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (4.316724ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.093665  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/cluster-admin
I0814 11:01:56.107151  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:discovery: (1.17647ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.136167  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (10.204842ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.136439  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:discovery
I0814 11:01:56.148750  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.148781  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.148828  110631 httplog.go:90] GET /healthz: (1.082106ms) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:56.150177  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:basic-user: (2.423937ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.158520  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.158550  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.158582  110631 httplog.go:90] GET /healthz: (959.803µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.167854  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.913963ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.168489  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:basic-user
I0814 11:01:56.187492  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:public-info-viewer: (1.465699ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.207819  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.803592ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.208058  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:public-info-viewer
I0814 11:01:56.227350  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node-proxier: (1.393962ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.249423  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.249479  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.249522  110631 httplog.go:90] GET /healthz: (2.660402ms) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:56.249972  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (4.024614ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.250193  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:node-proxier
I0814 11:01:56.259145  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.259172  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.259209  110631 httplog.go:90] GET /healthz: (1.570611ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.267283  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-controller-manager: (1.318123ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.288608  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.476512ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.288925  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-controller-manager
I0814 11:01:56.307060  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-dns: (1.053405ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.327942  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.900382ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.328213  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-dns
I0814 11:01:56.348253  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.348302  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.348342  110631 httplog.go:90] GET /healthz: (1.368388ms) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:56.348804  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-scheduler: (2.785256ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.359599  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.359632  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.359682  110631 httplog.go:90] GET /healthz: (1.775417ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.367924  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.00467ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.368169  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-scheduler
I0814 11:01:56.387372  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:volume-scheduler: (1.306717ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.407746  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.758825ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.408048  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:volume-scheduler
I0814 11:01:56.427803  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node: (1.784998ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.448137  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.448180  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.448148  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.153946ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.448217  110631 httplog.go:90] GET /healthz: (1.120589ms) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:56.448778  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:node
I0814 11:01:56.459106  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.459143  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.459213  110631 httplog.go:90] GET /healthz: (1.506592ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.467183  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:attachdetach-controller: (1.234787ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.488227  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.184352ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.488519  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I0814 11:01:56.507292  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:clusterrole-aggregation-controller: (1.254247ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.528297  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.223773ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.528573  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I0814 11:01:56.547693  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.547727  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.547759  110631 httplog.go:90] GET /healthz: (875.75µs) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:56.547760  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:cronjob-controller: (1.787236ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.559103  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.559137  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.559172  110631 httplog.go:90] GET /healthz: (1.41094ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.568219  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.190518ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.568633  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:cronjob-controller
I0814 11:01:56.587571  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:daemon-set-controller: (1.52312ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.608644  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.563614ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.608916  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I0814 11:01:56.627741  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:deployment-controller: (1.69725ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.648225  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.203359ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.648622  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:deployment-controller
I0814 11:01:56.648625  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.648666  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.648712  110631 httplog.go:90] GET /healthz: (1.775755ms) 0 [Go-http-client/1.1 127.0.0.1:45668]
I0814 11:01:56.658887  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.658923  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.658961  110631 httplog.go:90] GET /healthz: (1.24886ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.667240  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:disruption-controller: (1.254796ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.688533  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.390153ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.689265  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:disruption-controller
I0814 11:01:56.707525  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:endpoint-controller: (1.511846ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.729896  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.564741ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.730330  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:endpoint-controller
I0814 11:01:56.747767  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:expand-controller: (1.77585ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.748257  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.748456  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.748577  110631 httplog.go:90] GET /healthz: (1.509287ms) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:56.758794  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.758835  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.758877  110631 httplog.go:90] GET /healthz: (1.088801ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.768510  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.576317ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.768704  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:expand-controller
I0814 11:01:56.787402  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:generic-garbage-collector: (1.446896ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.808868  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.815867ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.809128  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I0814 11:01:56.828450  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:horizontal-pod-autoscaler: (1.668411ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.849281  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.095343ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.849573  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I0814 11:01:56.850326  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.850590  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.851037  110631 httplog.go:90] GET /healthz: (3.940847ms) 0 [Go-http-client/1.1 127.0.0.1:45668]
I0814 11:01:56.859517  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.859555  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.859594  110631 httplog.go:90] GET /healthz: (1.467726ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.867608  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:job-controller: (1.589815ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.888182  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.131482ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.888512  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:job-controller
I0814 11:01:56.910118  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:namespace-controller: (1.412198ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.928417  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.365628ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.928711  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:namespace-controller
I0814 11:01:56.947909  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:node-controller: (1.933653ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:56.948855  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.948889  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.948919  110631 httplog.go:90] GET /healthz: (1.557293ms) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:56.958971  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:56.959016  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:56.959068  110631 httplog.go:90] GET /healthz: (1.327918ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.968686  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.668787ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:56.969057  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:node-controller
I0814 11:01:56.991234  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:persistent-volume-binder: (4.477843ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.008719  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.715907ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.009010  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I0814 11:01:57.027362  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pod-garbage-collector: (1.33232ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.049613  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.049651  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.049689  110631 httplog.go:90] GET /healthz: (1.844112ms) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:57.050081  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.231424ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.050323  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I0814 11:01:57.059296  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.059352  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.059393  110631 httplog.go:90] GET /healthz: (1.340818ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.067553  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replicaset-controller: (1.559902ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.088556  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.454685ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.089044  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replicaset-controller
I0814 11:01:57.107483  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replication-controller: (1.449455ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.127930  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.928998ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.128184  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replication-controller
I0814 11:01:57.147600  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:resourcequota-controller: (1.515893ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.148651  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.148700  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.148741  110631 httplog.go:90] GET /healthz: (1.008742ms) 0 [Go-http-client/1.1 127.0.0.1:45668]
I0814 11:01:57.159031  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.159068  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.159113  110631 httplog.go:90] GET /healthz: (1.32198ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.168259  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.265937ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.168709  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I0814 11:01:57.187549  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:route-controller: (1.557427ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.208926  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.980198ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.209196  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:route-controller
I0814 11:01:57.228364  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-account-controller: (1.530462ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.247933  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.8952ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.248178  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-account-controller
I0814 11:01:57.248382  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.248399  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.248424  110631 httplog.go:90] GET /healthz: (1.396518ms) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:57.258933  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.258970  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.259021  110631 httplog.go:90] GET /healthz: (1.268877ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.267890  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-controller: (1.9083ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.288364  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.263473ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.289093  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-controller
I0814 11:01:57.307158  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:statefulset-controller: (1.189499ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.328257  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.230551ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.328530  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:statefulset-controller
I0814 11:01:57.347421  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:ttl-controller: (1.4821ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.354522  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.354560  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.354604  110631 httplog.go:90] GET /healthz: (7.083119ms) 0 [Go-http-client/1.1 127.0.0.1:45668]
I0814 11:01:57.359153  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.359185  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.359222  110631 httplog.go:90] GET /healthz: (1.451418ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.368123  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.146781ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.368368  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:ttl-controller
I0814 11:01:57.387668  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:certificate-controller: (1.66671ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.408963  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.915461ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.409230  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:certificate-controller
I0814 11:01:57.427196  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pvc-protection-controller: (1.188697ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.448914  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.940103ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.449329  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I0814 11:01:57.450326  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.450404  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.450479  110631 httplog.go:90] GET /healthz: (3.613747ms) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:57.458589  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.458619  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.458657  110631 httplog.go:90] GET /healthz: (995.958µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.467518  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pv-protection-controller: (1.553501ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.489188  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.774918ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.489675  110631 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I0814 11:01:57.509648  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/extension-apiserver-authentication-reader: (2.773239ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.511366  110631 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.309679ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.528285  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.126824ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.528538  110631 storage_rbac.go:278] created role.rbac.authorization.k8s.io/extension-apiserver-authentication-reader in kube-system
I0814 11:01:57.548099  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.548135  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.548178  110631 httplog.go:90] GET /healthz: (1.385227ms) 0 [Go-http-client/1.1 127.0.0.1:45668]
I0814 11:01:57.548210  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:bootstrap-signer: (2.183609ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:57.549782  110631 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.153452ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.558653  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.558690  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.558739  110631 httplog.go:90] GET /healthz: (994.769µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.567895  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (1.903989ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.568179  110631 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I0814 11:01:57.587202  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:cloud-provider: (1.175567ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.589156  110631 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.452323ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.608190  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.089837ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.608489  110631 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0814 11:01:57.627743  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:token-cleaner: (1.673465ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.629729  110631 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.520287ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.649185  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.649223  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.649269  110631 httplog.go:90] GET /healthz: (1.619139ms) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:57.650412  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.753236ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.650900  110631 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I0814 11:01:57.659028  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.659067  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.659110  110631 httplog.go:90] GET /healthz: (1.37126ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.667872  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-controller-manager: (1.888525ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.669969  110631 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.493394ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.689215  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (3.153166ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.689524  110631 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I0814 11:01:57.707261  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-scheduler: (1.225172ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.709369  110631 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.476814ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.727983  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (1.980142ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.728480  110631 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I0814 11:01:57.747687  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles/system:controller:bootstrap-signer: (1.635198ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.747690  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.747730  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.747767  110631 httplog.go:90] GET /healthz: (940.358µs) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:57.749735  110631 httplog.go:90] GET /api/v1/namespaces/kube-public: (1.367653ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.758780  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.758815  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.758875  110631 httplog.go:90] GET /healthz: (1.180074ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.767995  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles: (2.025676ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.768346  110631 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I0814 11:01:57.787609  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::extension-apiserver-authentication-reader: (1.488724ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.789702  110631 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.531171ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.808311  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (1.991002ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.808653  110631 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system::extension-apiserver-authentication-reader in kube-system
I0814 11:01:57.828332  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-controller-manager: (1.381085ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.830304  110631 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.423112ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.847814  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.847853  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.847901  110631 httplog.go:90] GET /healthz: (1.106603ms) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:57.848748  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.616823ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.849100  110631 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I0814 11:01:57.863870  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.863908  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.863959  110631 httplog.go:90] GET /healthz: (1.295725ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.867339  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-scheduler: (1.357991ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.869444  110631 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.485034ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.888876  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.778468ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.889588  110631 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I0814 11:01:57.907319  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:bootstrap-signer: (1.318097ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.909426  110631 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.534345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.928387  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.28649ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.928663  110631 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I0814 11:01:57.947502  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:cloud-provider: (1.526038ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.947723  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.947753  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.947793  110631 httplog.go:90] GET /healthz: (995.645µs) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:57.949224  110631 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.32136ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.958888  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:57.958921  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:57.958958  110631 httplog.go:90] GET /healthz: (1.163965ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.968338  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.337324ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.968582  110631 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0814 11:01:57.987558  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:token-cleaner: (1.532063ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:57.989575  110631 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.410209ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:58.008217  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.186703ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:58.008602  110631 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I0814 11:01:58.027371  110631 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings/system:controller:bootstrap-signer: (1.295682ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:58.029151  110631 httplog.go:90] GET /api/v1/namespaces/kube-public: (1.332714ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:58.048196  110631 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0814 11:01:58.048230  110631 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0814 11:01:58.048274  110631 httplog.go:90] GET /healthz: (1.49286ms) 0 [Go-http-client/1.1 127.0.0.1:45666]
I0814 11:01:58.048281  110631 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings: (2.284552ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:58.048534  110631 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I0814 11:01:58.058771  110631 httplog.go:90] GET /healthz: (1.068015ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:58.060671  110631 httplog.go:90] GET /api/v1/namespaces/default: (1.325232ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:58.063095  110631 httplog.go:90] POST /api/v1/namespaces: (1.911687ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:58.065148  110631 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.513908ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:58.068837  110631 httplog.go:90] POST /api/v1/namespaces/default/services: (3.213968ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:58.070340  110631 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.158463ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:58.072195  110631 httplog.go:90] POST /api/v1/namespaces/default/endpoints: (1.448538ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:58.148479  110631 httplog.go:90] GET /healthz: (1.529339ms) 200 [Go-http-client/1.1 127.0.0.1:45666]
W0814 11:01:58.149416  110631 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0814 11:01:58.149444  110631 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0814 11:01:58.149514  110631 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0814 11:01:58.149526  110631 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0814 11:01:58.149537  110631 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0814 11:01:58.149546  110631 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0814 11:01:58.149585  110631 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0814 11:01:58.149597  110631 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0814 11:01:58.149611  110631 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0814 11:01:58.149661  110631 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0814 11:01:58.149675  110631 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
I0814 11:01:58.149698  110631 factory.go:294] Creating scheduler from algorithm provider 'DefaultProvider'
I0814 11:01:58.149708  110631 factory.go:382] Creating scheduler with fit predicates 'map[CheckNodeCondition:{} CheckNodeDiskPressure:{} CheckNodeMemoryPressure:{} CheckNodePIDPressure:{} CheckVolumeBinding:{} GeneralPredicates:{} MatchInterPodAffinity:{} MaxAzureDiskVolumeCount:{} MaxCSIVolumeCountPred:{} MaxEBSVolumeCount:{} MaxGCEPDVolumeCount:{} NoDiskConflict:{} NoVolumeZoneConflict:{} PodToleratesNodeTaints:{}]' and priority functions 'map[BalancedResourceAllocation:{} ImageLocalityPriority:{} InterPodAffinityPriority:{} LeastRequestedPriority:{} NodeAffinityPriority:{} NodePreferAvoidPodsPriority:{} SelectorSpreadPriority:{} TaintTolerationPriority:{}]'
I0814 11:01:58.150221  110631 reflector.go:122] Starting reflector *v1.Pod (1s) from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.151119  110631 reflector.go:160] Listing and watching *v1.Pod from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.150497  110631 reflector.go:122] Starting reflector *v1.StatefulSet (1s) from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.151493  110631 reflector.go:160] Listing and watching *v1.StatefulSet from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.150506  110631 reflector.go:122] Starting reflector *v1.StorageClass (1s) from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.150513  110631 reflector.go:122] Starting reflector *v1beta1.CSINode (1s) from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.150578  110631 reflector.go:122] Starting reflector *v1.ReplicationController (1s) from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.150605  110631 reflector.go:122] Starting reflector *v1.PersistentVolume (1s) from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.150658  110631 reflector.go:122] Starting reflector *v1.Node (1s) from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.150720  110631 reflector.go:122] Starting reflector *v1.PersistentVolumeClaim (1s) from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.150745  110631 reflector.go:122] Starting reflector *v1.ReplicaSet (1s) from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.150769  110631 reflector.go:122] Starting reflector *v1beta1.PodDisruptionBudget (1s) from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.150997  110631 reflector.go:122] Starting reflector *v1.Service (1s) from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.151848  110631 reflector.go:160] Listing and watching *v1.Service from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.152420  110631 httplog.go:90] GET /apis/apps/v1/statefulsets?limit=500&resourceVersion=0: (573.175µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:01:58.152618  110631 httplog.go:90] GET /api/v1/pods?limit=500&resourceVersion=0: (1.117309ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:58.151775  110631 reflector.go:160] Listing and watching *v1.StorageClass from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.151786  110631 reflector.go:160] Listing and watching *v1beta1.CSINode from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.151796  110631 reflector.go:160] Listing and watching *v1.ReplicationController from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.151805  110631 reflector.go:160] Listing and watching *v1.PersistentVolume from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.151814  110631 reflector.go:160] Listing and watching *v1.Node from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.151823  110631 reflector.go:160] Listing and watching *v1.PersistentVolumeClaim from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.153450  110631 httplog.go:90] GET /api/v1/services?limit=500&resourceVersion=0: (650.982µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46000]
I0814 11:01:58.153630  110631 httplog.go:90] GET /apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: (504.753µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:58.153695  110631 httplog.go:90] GET /apis/storage.k8s.io/v1beta1/csinodes?limit=500&resourceVersion=0: (457.769µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46002]
I0814 11:01:58.151830  110631 reflector.go:160] Listing and watching *v1.ReplicaSet from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.153769  110631 get.go:250] Starting watch for /api/v1/pods, rv=29418 labels= fields= timeout=5m48s
I0814 11:01:58.151839  110631 reflector.go:160] Listing and watching *v1beta1.PodDisruptionBudget from k8s.io/client-go/informers/factory.go:133
I0814 11:01:58.153970  110631 httplog.go:90] GET /api/v1/replicationcontrollers?limit=500&resourceVersion=0: (443.873µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46008]
I0814 11:01:58.154573  110631 get.go:250] Starting watch for /apis/storage.k8s.io/v1/storageclasses, rv=29420 labels= fields= timeout=8m9s
I0814 11:01:58.154594  110631 get.go:250] Starting watch for /api/v1/services, rv=29645 labels= fields= timeout=8m15s
I0814 11:01:58.154690  110631 httplog.go:90] GET /api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: (411.113µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46010]
I0814 11:01:58.154715  110631 httplog.go:90] GET /api/v1/nodes?limit=500&resourceVersion=0: (454.939µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46000]
I0814 11:01:58.154767  110631 httplog.go:90] GET /api/v1/persistentvolumes?limit=500&resourceVersion=0: (349.504µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:01:58.155108  110631 httplog.go:90] GET /apis/apps/v1/replicasets?limit=500&resourceVersion=0: (335.337µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46008]
I0814 11:01:58.155238  110631 get.go:250] Starting watch for /api/v1/replicationcontrollers, rv=29418 labels= fields= timeout=9m19s
I0814 11:01:58.155377  110631 get.go:250] Starting watch for /api/v1/nodes, rv=29418 labels= fields= timeout=7m32s
I0814 11:01:58.155419  110631 get.go:250] Starting watch for /api/v1/persistentvolumeclaims, rv=29418 labels= fields= timeout=7m52s
I0814 11:01:58.155502  110631 get.go:250] Starting watch for /apis/storage.k8s.io/v1beta1/csinodes, rv=29419 labels= fields= timeout=6m39s
I0814 11:01:58.155811  110631 get.go:250] Starting watch for /api/v1/persistentvolumes, rv=29418 labels= fields= timeout=9m2s
I0814 11:01:58.155997  110631 get.go:250] Starting watch for /apis/apps/v1/replicasets, rv=29420 labels= fields= timeout=8m54s
I0814 11:01:58.156402  110631 httplog.go:90] GET /apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: (355.021µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46022]
I0814 11:01:58.156432  110631 get.go:250] Starting watch for /apis/apps/v1/statefulsets, rv=29420 labels= fields= timeout=5m44s
I0814 11:01:58.157002  110631 get.go:250] Starting watch for /apis/policy/v1beta1/poddisruptionbudgets, rv=29418 labels= fields= timeout=6m48s
I0814 11:01:58.250173  110631 shared_informer.go:211] caches populated
I0814 11:01:58.350472  110631 shared_informer.go:211] caches populated
I0814 11:01:58.451430  110631 shared_informer.go:211] caches populated
I0814 11:01:58.552058  110631 shared_informer.go:211] caches populated
I0814 11:01:58.652296  110631 shared_informer.go:211] caches populated
I0814 11:01:58.752579  110631 shared_informer.go:211] caches populated
I0814 11:01:58.853417  110631 shared_informer.go:211] caches populated
I0814 11:01:58.953613  110631 shared_informer.go:211] caches populated
I0814 11:01:59.053848  110631 shared_informer.go:211] caches populated
I0814 11:01:59.153260  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:01:59.154039  110631 shared_informer.go:211] caches populated
I0814 11:01:59.154121  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:01:59.154345  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:01:59.154482  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:01:59.155213  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:01:59.155228  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:01:59.155660  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:01:59.254246  110631 shared_informer.go:211] caches populated
I0814 11:01:59.354453  110631 shared_informer.go:211] caches populated
I0814 11:01:59.357029  110631 httplog.go:90] POST /api/v1/nodes: (1.773568ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
I0814 11:01:59.357299  110631 node_tree.go:93] Added node "test-node-0" in group "" to NodeTree
I0814 11:01:59.359338  110631 httplog.go:90] POST /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods: (1.791884ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
I0814 11:01:59.360109  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/waiting-pod
I0814 11:01:59.360123  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/waiting-pod
I0814 11:01:59.360272  110631 scheduler_binder.go:256] AssumePodVolumes for pod "preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/waiting-pod", node "test-node-0"
I0814 11:01:59.360287  110631 scheduler_binder.go:266] AssumePodVolumes for pod "preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/waiting-pod", node "test-node-0": all PVCs bound and nothing to do
I0814 11:01:59.360339  110631 framework.go:562] waiting for 30s for pod "waiting-pod" at permit
I0814 11:01:59.362756  110631 factory.go:615] Attempting to bind signalling-pod to test-node-0
I0814 11:01:59.363081  110631 factory.go:615] Attempting to bind waiting-pod to test-node-0
I0814 11:01:59.364136  110631 scheduler.go:447] Failed to bind pod: permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/signalling-pod
E0814 11:01:59.364153  110631 scheduler.go:449] scheduler cache ForgetPod failed: pod 09eb4e73-010e-4ea5-a7c6-ac2e588a9865 wasn't assumed so cannot be forgotten
E0814 11:01:59.364170  110631 scheduler.go:605] error binding pod: Post http://127.0.0.1:33467/api/v1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/pods/signalling-pod/binding: dial tcp 127.0.0.1:33467: connect: connection refused
E0814 11:01:59.364195  110631 factory.go:566] Error scheduling permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/signalling-pod: Post http://127.0.0.1:33467/api/v1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/pods/signalling-pod/binding: dial tcp 127.0.0.1:33467: connect: connection refused; retrying
I0814 11:01:59.364228  110631 factory.go:624] Updating pod condition for permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/signalling-pod to (PodScheduled==False, Reason=SchedulerError)
E0814 11:01:59.364919  110631 event_broadcaster.go:242] Unable to write event: 'Post http://127.0.0.1:33467/apis/events.k8s.io/v1beta1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/events: dial tcp 127.0.0.1:33467: connect: connection refused' (may retry after sleeping)
E0814 11:01:59.364985  110631 scheduler.go:280] Error updating the condition of the pod permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/signalling-pod: Put http://127.0.0.1:33467/api/v1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/pods/signalling-pod/status: dial tcp 127.0.0.1:33467: connect: connection refused
E0814 11:01:59.365032  110631 factory.go:599] Error getting pod permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/signalling-pod for retry: Get http://127.0.0.1:33467/api/v1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/pods/signalling-pod: dial tcp 127.0.0.1:33467: connect: connection refused; retrying...
I0814 11:01:59.369776  110631 httplog.go:90] POST /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/waiting-pod/binding: (6.41238ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
I0814 11:01:59.370400  110631 scheduler.go:614] pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/waiting-pod is bound successfully on node "test-node-0", 1 nodes evaluated, 1 nodes were found feasible. Bound node resource: "Capacity: CPU<500m>|Memory<500>|Pods<32>|StorageEphemeral<0>; Allocatable: CPU<500m>|Memory<500>|Pods<32>|StorageEphemeral<0>.".
I0814 11:01:59.372320  110631 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/events: (1.584835ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
E0814 11:01:59.565589  110631 factory.go:599] Error getting pod permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/signalling-pod for retry: Get http://127.0.0.1:33467/api/v1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/pods/signalling-pod: dial tcp 127.0.0.1:33467: connect: connection refused; retrying...
E0814 11:01:59.966274  110631 factory.go:599] Error getting pod permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/signalling-pod for retry: Get http://127.0.0.1:33467/api/v1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/pods/signalling-pod: dial tcp 127.0.0.1:33467: connect: connection refused; retrying...
I0814 11:02:00.153473  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:00.154624  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:00.155757  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:00.155790  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:00.155804  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:00.155883  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:00.157045  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
E0814 11:02:00.766884  110631 factory.go:599] Error getting pod permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/signalling-pod for retry: Get http://127.0.0.1:33467/api/v1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/pods/signalling-pod: dial tcp 127.0.0.1:33467: connect: connection refused; retrying...
I0814 11:02:01.153767  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:01.154813  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:01.155881  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:01.155918  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:01.155946  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:01.155968  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:01.157240  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:02.154013  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:02.155010  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:02.156019  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:02.156118  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:02.156157  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:02.156181  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:02.157388  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
E0814 11:02:02.367619  110631 factory.go:599] Error getting pod permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/signalling-pod for retry: Get http://127.0.0.1:33467/api/v1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/pods/signalling-pod: dial tcp 127.0.0.1:33467: connect: connection refused; retrying...
I0814 11:02:03.154344  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:03.155183  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:03.156198  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:03.156262  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:03.156409  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:03.156759  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:03.157542  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:04.154602  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:04.155365  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:04.156326  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:04.156448  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:04.156543  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:04.156915  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:04.157690  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:05.154839  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:05.155545  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:05.156598  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:05.156611  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:05.156700  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:05.157381  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:05.157839  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
E0814 11:02:05.568302  110631 factory.go:599] Error getting pod permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/signalling-pod for retry: Get http://127.0.0.1:33467/api/v1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/pods/signalling-pod: dial tcp 127.0.0.1:33467: connect: connection refused; retrying...
I0814 11:02:06.155068  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:06.155718  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:06.156743  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:06.156787  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:06.156891  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:06.157510  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:06.157986  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:07.155281  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:07.155889  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:07.156924  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:07.156926  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:07.157070  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:07.157684  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:07.158214  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:08.061231  110631 httplog.go:90] GET /api/v1/namespaces/default: (1.548968ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
I0814 11:02:08.063141  110631 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.50366ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
I0814 11:02:08.064868  110631 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.17262ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
I0814 11:02:08.157081  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:08.157132  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:08.157254  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:08.157339  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:08.157383  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:08.157854  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:08.158375  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:09.157246  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:09.157435  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:09.157514  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:09.157585  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:09.158008  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:09.158128  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:09.158533  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:10.157481  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:10.157681  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:10.157709  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:10.157735  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:10.158100  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:10.158641  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:10.158667  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
E0814 11:02:11.101802  110631 event_broadcaster.go:242] Unable to write event: 'Post http://127.0.0.1:33467/apis/events.k8s.io/v1beta1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/events: dial tcp 127.0.0.1:33467: connect: connection refused' (may retry after sleeping)
I0814 11:02:11.157684  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:11.157863  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:11.157913  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:11.157989  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:11.158722  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:11.158786  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:11.158797  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
E0814 11:02:11.968956  110631 factory.go:599] Error getting pod permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/signalling-pod for retry: Get http://127.0.0.1:33467/api/v1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/pods/signalling-pod: dial tcp 127.0.0.1:33467: connect: connection refused; retrying...
I0814 11:02:12.158054  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:12.158054  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:12.158054  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:12.158839  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:12.159063  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:12.159267  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:12.159274  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:13.158251  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:13.158299  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:13.158418  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:13.159019  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:13.159267  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:13.159329  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:13.159405  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:14.158490  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:14.158498  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:14.158523  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:14.159167  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:14.159435  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:14.159480  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:14.159677  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:15.158684  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:15.158681  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:15.158766  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:15.159318  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:15.159688  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:15.159962  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:15.159964  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:16.158920  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:16.158945  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:16.159046  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:16.159444  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:16.159821  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:16.160649  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:16.160725  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:17.159152  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:17.159202  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:17.159172  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:17.159643  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:17.160127  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:17.160929  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:17.161129  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:18.062903  110631 httplog.go:90] GET /api/v1/namespaces/default: (2.971679ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
I0814 11:02:18.065724  110631 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (2.314489ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
I0814 11:02:18.068251  110631 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.870887ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
I0814 11:02:18.159296  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:18.159397  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:18.159429  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:18.160205  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:18.160527  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:18.161316  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:18.161410  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:19.159543  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:19.159610  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:19.159666  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:19.160372  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:19.160669  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:19.161515  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:19.161646  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:20.159706  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:20.159790  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:20.160452  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:20.160547  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:20.160818  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:20.161685  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:20.161855  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:21.159848  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:21.159938  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:21.160658  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:21.160671  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:21.160938  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:21.161843  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:21.162020  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:22.160008  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:22.160106  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:22.160825  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:22.160826  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:22.161079  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:22.161974  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:22.162213  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
E0814 11:02:22.791156  110631 event_broadcaster.go:242] Unable to write event: 'Post http://127.0.0.1:33467/apis/events.k8s.io/v1beta1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/events: dial tcp 127.0.0.1:33467: connect: connection refused' (may retry after sleeping)
I0814 11:02:23.160162  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:23.160364  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:23.160962  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:23.160970  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:23.161181  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:23.162125  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:23.162369  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:24.160360  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:24.160724  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:24.161150  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:24.161151  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:24.161356  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:24.162290  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:24.162536  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
E0814 11:02:24.769687  110631 factory.go:599] Error getting pod permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/signalling-pod for retry: Get http://127.0.0.1:33467/api/v1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/pods/signalling-pod: dial tcp 127.0.0.1:33467: connect: connection refused; retrying...
I0814 11:02:25.160535  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:25.160853  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:25.161306  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:25.161331  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:25.161517  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:25.162446  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:25.162707  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:26.160678  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:26.161025  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:26.161482  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:26.161511  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:26.161753  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:26.162630  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:26.162868  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:27.160912  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:27.161177  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:27.161615  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:27.161690  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:27.161858  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:27.162756  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:27.163032  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:28.061598  110631 httplog.go:90] GET /api/v1/namespaces/default: (1.630734ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
I0814 11:02:28.063189  110631 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.155849ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
I0814 11:02:28.064866  110631 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.188669ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
I0814 11:02:28.161139  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:28.161377  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:28.161768  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:28.161907  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:28.161995  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:28.162938  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:28.163639  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:29.161293  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:29.161498  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:29.161920  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:29.162037  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:29.162176  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:29.163126  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:29.163818  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:29.364312  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:29.364355  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:29.364360  110631 httplog.go:90] POST /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods: (3.372382ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
I0814 11:02:29.364490  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:29.364554  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:29.367591  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.458852ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50574]
I0814 11:02:29.367610  110631 httplog.go:90] PUT /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod/status: (2.566123ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
I0814 11:02:29.368370  110631 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/events: (3.011242ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:29.372638  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (4.530268ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50574]
I0814 11:02:29.373044  110631 generic_scheduler.go:1191] Node test-node-0 is a potential node for preemption.
I0814 11:02:29.376278  110631 httplog.go:90] PUT /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod/status: (2.745202ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:29.379709  110631 httplog.go:90] DELETE /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/waiting-pod: (2.870586ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:29.382393  110631 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/events: (1.880958ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:29.467657  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.293171ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:29.567243  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.964831ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:29.666846  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.686087ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:29.767011  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.845524ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:29.867061  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.813237ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:29.967058  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.88771ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:30.066738  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.576827ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:30.161584  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:30.161659  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:30.162081  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:30.162166  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:30.162341  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:30.163365  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:30.163967  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:30.164085  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:30.164099  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:30.164284  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:30.164353  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:30.167158  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.473028ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:30.167417  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.643961ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46112]
I0814 11:02:30.167433  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.881356ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:30.168226  110631 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/events: (2.82299ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50582]
I0814 11:02:30.274037  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (8.806321ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:30.367831  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.609118ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:30.467960  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.881519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:30.566713  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.609577ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:30.667240  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.969502ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:30.766937  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.814768ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:30.866979  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.902907ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:30.973763  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (8.567761ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:31.067589  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.081015ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:31.154587  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:31.154617  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:31.154840  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:31.154907  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:31.156923  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.677161ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:31.157529  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.285708ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:31.158797  110631 httplog.go:90] PATCH /apis/events.k8s.io/v1beta1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/events/preemptor-pod.15bac51d50c28286: (2.819325ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50650]
I0814 11:02:31.161732  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:31.161740  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:31.162215  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:31.162338  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:31.162476  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:31.163526  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:31.164118  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:31.164274  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:31.164286  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:31.164398  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:31.164445  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:31.166424  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.611247ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:31.167181  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.46743ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:31.167495  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.209413ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50652]
I0814 11:02:31.267043  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.91829ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:31.367455  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.119927ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:31.467267  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.051234ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:31.567973  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.847001ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:31.666930  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.78748ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:31.767478  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.16542ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:31.867044  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.730036ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:31.967155  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.85066ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:32.070188  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (4.918103ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:32.161876  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:32.161924  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:32.162389  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:32.162426  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:32.162590  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:32.163671  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:32.164256  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:32.164423  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:32.164445  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:32.164642  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:32.164721  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:32.167070  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.747846ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:32.167087  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.720899ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:32.168216  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.049306ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50778]
I0814 11:02:32.266931  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.796221ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:32.366919  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.645402ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:32.468707  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.921645ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:32.567142  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.980933ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:32.667017  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.894688ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:32.767180  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.052427ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:32.867360  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.176922ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:32.967219  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.041138ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:33.067091  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.975699ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:33.162611  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:33.162672  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:33.163224  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:33.164085  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:33.164435  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:33.164591  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:33.164601  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:33.164803  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:33.164847  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:33.164955  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:33.165074  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:33.167388  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.248084ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:33.168209  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.110212ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:33.168564  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.3882ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50584]
I0814 11:02:33.267352  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.154442ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:33.366971  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.84332ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:33.467256  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.033473ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
E0814 11:02:33.486801  110631 event_broadcaster.go:242] Unable to write event: 'Post http://127.0.0.1:33467/apis/events.k8s.io/v1beta1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/events: dial tcp 127.0.0.1:33467: connect: connection refused' (may retry after sleeping)
I0814 11:02:33.566953  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.810864ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:33.666946  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.754888ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:33.768027  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.615049ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:33.866992  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.781388ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:33.967292  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.93964ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:34.067318  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.859038ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:34.162864  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:34.162964  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:34.163412  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:34.164247  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:34.164606  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:34.164761  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:34.164776  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:34.164944  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:34.164993  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:34.165125  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:34.165232  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:34.167097  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.343577ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50866]
I0814 11:02:34.167172  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.939255ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:34.167388  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.121029ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50576]
I0814 11:02:34.267164  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.797355ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:34.366607  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.467862ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:34.467060  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.8618ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:34.566913  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.691535ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:34.666919  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.864526ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:34.767238  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.105333ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:34.866710  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.455866ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:34.967104  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.977702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:35.067408  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.199259ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:35.163066  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:35.163092  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:35.163639  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:35.164403  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:35.164769  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:35.164891  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:35.164903  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:35.165013  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:35.165070  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:35.165267  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:35.165389  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:35.167418  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.68718ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:35.167818  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.014836ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:35.168723  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.008002ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50866]
I0814 11:02:35.266610  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.537864ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:35.366601  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.490576ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:35.466701  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.559591ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:35.566925  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.678604ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:35.667226  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.068683ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:35.767069  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.84723ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:35.867280  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.163794ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:35.967182  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.971074ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:36.067207  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.998522ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:36.163290  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:36.163337  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:36.163837  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:36.164538  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:36.164922  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:36.165044  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:36.165056  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:36.165216  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:36.165275  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:36.166674  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:36.166677  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:36.167203  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.618432ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:36.167217  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.179415ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:36.167641  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.740254ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51126]
I0814 11:02:36.267629  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.358228ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:36.366909  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.802579ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:36.472628  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (7.513142ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:36.567246  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.086746ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:36.666588  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.470281ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:36.767030  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.891313ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:36.866955  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.85869ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:36.966946  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.815924ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:37.067284  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.068285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:37.163541  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:37.163672  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:37.164009  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:37.164704  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:37.165088  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:37.165252  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:37.165264  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:37.165491  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:37.165536  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:37.167034  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.274834ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:37.167044  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.822161ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:37.167300  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:37.167386  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:37.168213  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.771891ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51252]
I0814 11:02:37.266934  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.854425ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:37.367346  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.708433ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:37.466985  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.8284ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:37.567038  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.841344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:37.666849  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.746867ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:37.766788  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.618215ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:37.867396  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.160924ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:37.967103  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.435766ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:38.061432  110631 httplog.go:90] GET /api/v1/namespaces/default: (1.395775ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:38.063056  110631 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.198606ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:38.064736  110631 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.172784ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:38.066151  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.086882ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:38.163746  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:38.163816  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:38.164165  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:38.164853  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:38.165623  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:38.165781  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:38.165805  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:38.165980  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:38.166031  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:38.166917  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.772346ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:38.167493  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:38.167557  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:38.167836  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.605179ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:38.167925  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.199517ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51296]
I0814 11:02:38.267684  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.975139ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:38.367309  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.127143ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:38.467731  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.384836ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:38.567157  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.996445ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:38.666948  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.758715ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:38.766825  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.683508ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:38.866793  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.641196ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:38.967522  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.349132ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:39.067105  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.931333ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:39.163965  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:39.163965  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:39.164307  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:39.164994  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:39.165806  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:39.166005  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:39.166027  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:39.166198  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:39.166264  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:39.167858  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:39.167913  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:39.168189  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.101773ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:39.169236  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.699774ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:39.169713  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.685792ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51300]
I0814 11:02:39.267073  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.918344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:39.366706  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.630666ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:39.466743  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.696878ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:39.566936  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.761898ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:39.667054  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.894856ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:39.767390  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.127858ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:39.866781  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.64497ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:39.967009  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.880674ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:40.066769  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.679259ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:40.164185  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:40.164182  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:40.164404  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:40.165135  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:40.165964  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:40.166117  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:40.166145  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:40.166269  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:40.166334  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:40.167083  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.948585ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:40.168024  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:40.168029  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:40.168478  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.537591ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:40.169037  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.404225ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51108]
I0814 11:02:40.266941  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.641883ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:40.367601  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.414982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:40.467601  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.425627ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:40.566999  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.800573ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:40.667186  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.012312ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:40.767293  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.124003ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:40.867231  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.026189ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:40.966823  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.617659ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:41.067412  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.669019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:41.164409  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:41.164449  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:41.164521  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:41.165787  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:41.166143  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:41.166308  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:41.166323  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:41.166496  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:41.166539  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:41.166654  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.55545ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:41.168181  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:41.168215  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.074932ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:41.168245  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.416664ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:50820]
I0814 11:02:41.168315  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:41.267273  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.087159ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:41.369007  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.754217ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:41.468949  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.694358ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:41.567112  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.883906ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:41.668879  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.509476ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:41.769977  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (4.087583ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:41.867161  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.578721ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:41.966959  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.870254ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:42.067359  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.157283ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:42.164581  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:42.164687  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:42.164703  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:42.166235  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:42.167614  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:42.167815  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:42.167833  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:42.167988  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:42.168042  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:42.168351  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:42.168445  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:42.171476  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (6.181742ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:42.173584  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.112479ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51424]
I0814 11:02:42.173883  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.424996ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:42.267384  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.2188ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:42.368697  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.564922ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:42.467501  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.975857ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:42.566809  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.605921ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:42.667791  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.504009ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:42.767284  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.106494ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:42.867286  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.940389ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:42.967259  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.060345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:43.067173  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.867602ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:43.164822  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:43.164880  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:43.164998  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:43.167743  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:43.167791  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:43.167906  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:43.167916  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:43.168097  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:43.168138  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:43.168596  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:43.168658  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:43.170841  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.029184ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:43.171172  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.385583ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:43.172925  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.379902ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51648]
I0814 11:02:43.267553  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.243572ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:43.367495  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.110371ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:43.467600  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.329547ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:43.567604  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.174262ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:43.667779  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.225522ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:43.767547  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.673776ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:43.867984  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.606999ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:43.967441  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.066742ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:44.067381  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.09446ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:44.165071  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:44.165087  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:44.165309  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:44.167489  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.208533ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:44.167943  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:44.167943  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:44.168080  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:44.168104  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:44.168260  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:44.168321  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:44.168842  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:44.168881  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:44.171339  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.615238ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:44.171341  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.335702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
E0814 11:02:44.178564  110631 event_broadcaster.go:242] Unable to write event: 'Post http://127.0.0.1:33467/apis/events.k8s.io/v1beta1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/events: dial tcp 127.0.0.1:33467: connect: connection refused' (may retry after sleeping)
I0814 11:02:44.270405  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (5.048209ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:44.367213  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.000388ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:44.467265  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.103781ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:44.567845  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.566443ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:44.667261  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.100301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:44.767703  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.183318ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:44.872967  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.15313ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:44.967912  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.635152ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:45.067392  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.271773ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:45.165385  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:45.165485  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:45.165525  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:45.168111  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:45.168296  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:45.168319  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:45.168315  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.251923ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:45.168496  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:45.168547  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:45.169079  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:45.169111  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:45.169268  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:45.170383  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.534561ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:45.170795  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.942565ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:45.267025  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.902906ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:45.368120  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.760368ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:45.466905  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.714611ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:45.570551  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (4.697962ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:45.667367  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.229981ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:45.767182  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.011058ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:45.868390  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.227789ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:45.966999  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.88409ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:46.067074  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.919973ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:46.165543  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:46.165575  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:46.165684  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:46.167750  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.474558ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:46.168263  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:46.168374  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:46.168386  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:46.168557  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:46.168610  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:46.169185  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:46.169442  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:46.169483  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:46.170351  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.402817ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:46.170389  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.456313ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:46.266856  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.720328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:46.367186  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.710434ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:46.467354  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.018519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:46.566961  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.837074ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:46.667043  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.49168ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:46.767160  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.071664ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:46.866962  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.815056ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:46.967034  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.877838ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:47.067293  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.149918ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:47.165814  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:47.165836  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:47.165856  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:47.167192  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.004415ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:47.168419  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:47.168558  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:47.168569  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:47.168709  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:47.168753  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:47.169482  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:47.169590  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:47.169615  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:47.171284  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.321128ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:47.171671  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.564502ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:47.267804  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.361577ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:47.366988  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.821949ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:47.466535  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.436441ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:47.567308  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.04975ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:47.667006  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.856009ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:47.766889  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.692502ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:47.867211  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.086479ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:47.966730  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.620412ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:48.062667  110631 httplog.go:90] GET /api/v1/namespaces/default: (1.791104ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:48.064489  110631 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.265015ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:48.065982  110631 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.079004ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:48.066718  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.008408ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:48.165972  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:48.165972  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:48.165972  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:48.166963  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.837118ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:48.168574  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:48.168693  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:48.168713  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:48.168873  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:48.168927  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:48.169881  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:48.169930  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:48.169950  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:48.171762  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.570979ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:48.171769  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.427683ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:48.267958  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.740353ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:48.367209  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.910753ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:48.467430  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.317118ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:48.566729  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.584109ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:48.667756  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.586844ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:48.767044  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.920528ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:48.870342  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.900273ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:48.966979  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.781694ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:49.067790  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.682511ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:49.166143  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:49.166173  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:49.166145  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:49.167297  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.170383ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:49.168738  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:49.168887  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:49.168909  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:49.169054  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:49.169103  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:49.170026  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:49.170054  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:49.170067  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:49.170874  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.356555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:49.171078  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.612018ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:49.267228  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.074819ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:49.367050  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.912998ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:49.466989  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.857375ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:49.568116  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.988043ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:49.666854  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.753638ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:49.767146  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.07879ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:49.866851  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.739401ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:49.966867  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.758538ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:50.067292  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.099583ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:50.166311  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:50.167242  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.995443ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:50.167408  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:50.167431  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:50.168923  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:50.169101  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:50.169123  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:50.169297  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:50.169354  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:50.170432  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:50.170432  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:50.170445  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:50.171246  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.491689ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:50.171373  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.682809ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:50.266644  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.518996ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:50.367697  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.481162ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
E0814 11:02:50.370227  110631 factory.go:599] Error getting pod permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/signalling-pod for retry: Get http://127.0.0.1:33467/api/v1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/pods/signalling-pod: dial tcp 127.0.0.1:33467: connect: connection refused; retrying...
I0814 11:02:50.467578  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.385873ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:50.567211  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.034076ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:50.667112  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.890077ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:50.767160  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.944396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:50.878682  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (13.45156ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:50.967250  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.912224ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:51.066901  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.674324ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:51.166441  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:51.166929  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.518675ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:51.167530  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:51.167536  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:51.169075  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:51.169174  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:51.169197  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:51.169303  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:51.169519  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:51.170582  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:51.170590  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:51.170616  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:51.171110  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.112347ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:51.171420  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.016825ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:51.266880  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.703949ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:51.366695  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.60981ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:51.466523  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.417153ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:51.567116  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.020935ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:51.666786  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.638065ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:51.767928  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.76611ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:51.867493  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.275281ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:51.967177  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.06345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:52.066739  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.715121ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:52.166565  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:52.167309  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.087144ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:52.167695  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:52.168502  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:52.169209  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:52.169349  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:52.169366  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:52.169513  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:52.169552  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:52.170698  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:52.170711  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:52.170721  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:52.171455  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.495017ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:52.171498  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.520483ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:52.267528  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.366707ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:52.367818  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.656515ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:52.468360  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.051405ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:52.568275  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.905391ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:52.667373  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.2698ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:52.767098  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.764855ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:52.867196  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.054587ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:52.967209  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.119865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:53.066559  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.473639ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:53.166726  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:53.166989  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.842099ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:53.168035  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:53.168750  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:53.169388  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:53.169555  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:53.169579  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:53.169772  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:53.169836  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:53.170860  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:53.170952  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:53.170957  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:53.171609  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.454596ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:53.172233  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.077201ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:53.268520  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.412607ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:53.367231  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.949577ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:53.467501  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.357713ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:53.567138  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.016926ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:53.666839  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.783641ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:53.767574  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.293305ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:53.866578  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.520103ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:53.966732  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.573286ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:54.066847  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.712723ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:54.166839  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:54.167028  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.829387ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:54.168221  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:54.168880  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:54.169539  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:54.169695  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:54.169710  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:54.169848  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:54.169885  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:54.171046  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:54.171069  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:54.171252  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:54.172194  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.43179ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:54.172832  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.06519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:54.267099  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.929833ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:54.368019  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.918367ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:54.467235  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.043986ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:54.566772  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.583631ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:54.667275  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.147417ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:54.767387  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.313291ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:54.771937  110631 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.594753ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:54.773648  110631 httplog.go:90] GET /api/v1/namespaces/kube-public: (1.327021ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:54.775112  110631 httplog.go:90] GET /api/v1/namespaces/kube-node-lease: (1.077484ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:54.867396  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.233893ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
E0814 11:02:54.881549  110631 event_broadcaster.go:242] Unable to write event: 'Post http://127.0.0.1:33467/apis/events.k8s.io/v1beta1/namespaces/permit-plugin4a2b06ee-ca1a-4c9e-9d0b-fa335a6948fa/events: dial tcp 127.0.0.1:33467: connect: connection refused' (may retry after sleeping)
I0814 11:02:54.966570  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.472484ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:55.067145  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.944625ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:55.166795  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.653564ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:55.166988  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:55.168393  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:55.169011  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:55.169686  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:55.169910  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:55.169931  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:55.170080  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:55.170145  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:55.171146  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:55.171184  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:55.171387  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:55.172204  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.82039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:55.172450  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.824877ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:55.266975  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.888701ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:55.367177  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.074602ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:55.468720  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.677755ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:55.567262  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.96689ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:55.666765  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.670139ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:55.767184  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.036945ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:55.866983  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.83147ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:55.967706  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.59048ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:56.067935  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.829918ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:56.166552  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.490509ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:56.167104  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:56.168560  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:56.169162  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:56.169858  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:56.169992  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:56.170013  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:56.170140  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:56.170188  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:56.171294  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:56.171537  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:56.171592  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:56.172412  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.785579ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:56.172721  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.231843ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:56.266703  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.490902ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:56.366732  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.590366ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:56.466990  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.891477ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:56.566610  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.521649ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:56.666727  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.639649ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:56.766712  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.60508ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:56.867042  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.925238ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:56.966726  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.624803ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:57.066759  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.55274ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:57.158511  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:57.158546  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:57.158707  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:57.158748  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:57.160901  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.84702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:57.160905  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.798641ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:57.166364  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.260854ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:57.167270  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:57.168757  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:57.169305  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:57.170024  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:57.170208  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:57.170239  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:57.170404  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:57.170452  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:57.171435  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:57.171721  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:57.171721  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:57.172443  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.749709ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:57.172571  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.78757ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:57.267298  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.050764ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:57.366847  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.68503ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:57.466755  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.691338ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:57.566760  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.596323ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:57.666806  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.55805ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:57.766863  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.724731ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:57.866894  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.761819ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:57.967939  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.855293ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:58.062731  110631 httplog.go:90] GET /api/v1/namespaces/default: (1.763603ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:58.064115  110631 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (966.419µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:58.065397  110631 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (954.709µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:58.073434  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (8.420162ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:58.167142  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.022142ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:58.167500  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:58.168931  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:58.169472  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:58.170263  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:58.170531  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:58.170723  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:58.171035  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:58.171155  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:58.171582  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:58.171885  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:58.171895  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:58.172987  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.513858ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:58.177501  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.167278ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:58.267036  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.93264ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:58.366714  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.53424ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:58.466756  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.67556ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:58.566677  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.544894ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:58.666701  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.574757ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:58.768747  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (3.624594ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:58.866709  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.560223ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:58.967223  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.017805ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:59.066556  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.45839ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:59.166488  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.400836ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:59.167646  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:59.169211  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:59.169693  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:59.170519  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:59.170617  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:59.170627  110631 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:59.170752  110631 factory.go:550] Unable to schedule preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0814 11:02:59.170795  110631 factory.go:624] Updating pod condition for preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0814 11:02:59.171726  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:59.171997  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:59.172072  110631 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0814 11:02:59.173138  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.046328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:59.173441  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (2.196257ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:59.266802  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.716829ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:59.366651  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.559301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:59.368319  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (1.19841ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:59.370171  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/waiting-pod: (1.29108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:59.375270  110631 httplog.go:90] DELETE /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/waiting-pod: (4.617328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:59.378594  110631 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:59.378634  110631 scheduler.go:473] Skip schedule deleting pod: preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/preemptor-pod
I0814 11:02:59.381103  110631 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/events: (2.151011ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51326]
I0814 11:02:59.382247  110631 httplog.go:90] DELETE /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (6.554509ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:59.384740  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/waiting-pod: (1.004067ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:59.387265  110631 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/pods/preemptor-pod: (957.973µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
E0814 11:02:59.387749  110631 scheduling_queue.go:833] Error while retrieving next pod from scheduling queue: scheduling queue is closed
I0814 11:02:59.388047  110631 httplog.go:90] GET /api/v1/nodes?allowWatchBookmarks=true&resourceVersion=29418&timeout=7m32s&timeoutSeconds=452&watch=true: (1m1.232943593s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46002]
I0814 11:02:59.388052  110631 httplog.go:90] GET /apis/policy/v1beta1/poddisruptionbudgets?allowWatchBookmarks=true&resourceVersion=29418&timeout=6m48s&timeoutSeconds=408&watch=true: (1m1.231302364s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46022]
I0814 11:02:59.388068  110631 httplog.go:90] GET /api/v1/services?allowWatchBookmarks=true&resourceVersion=29645&timeout=8m15s&timeoutSeconds=495&watch=true: (1m1.23379003s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46012]
I0814 11:02:59.388084  110631 httplog.go:90] GET /apis/storage.k8s.io/v1beta1/csinodes?allowWatchBookmarks=true&resourceVersion=29419&timeout=6m39s&timeoutSeconds=399&watch=true: (1m1.233342798s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46016]
I0814 11:02:59.388149  110631 httplog.go:90] GET /apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=29420&timeout=8m9s&timeoutSeconds=489&watch=true: (1m1.233909425s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46018]
I0814 11:02:59.388167  110631 httplog.go:90] GET /api/v1/replicationcontrollers?allowWatchBookmarks=true&resourceVersion=29418&timeout=9m19s&timeoutSeconds=559&watch=true: (1m1.233404244s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46014]
I0814 11:02:59.388172  110631 httplog.go:90] GET /api/v1/persistentvolumeclaims?allowWatchBookmarks=true&resourceVersion=29418&timeout=7m52s&timeoutSeconds=472&watch=true: (1m1.233045613s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46020]
I0814 11:02:59.388239  110631 httplog.go:90] GET /api/v1/persistentvolumes?allowWatchBookmarks=true&resourceVersion=29418&timeout=9m2s&timeoutSeconds=542&watch=true: (1m1.232692853s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45666]
I0814 11:02:59.388258  110631 httplog.go:90] GET /apis/apps/v1/replicasets?allowWatchBookmarks=true&resourceVersion=29420&timeout=8m54s&timeoutSeconds=534&watch=true: (1m1.232535313s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46008]
I0814 11:02:59.388245  110631 httplog.go:90] GET /api/v1/pods?allowWatchBookmarks=true&resourceVersion=29418&timeout=5m48s&timeoutSeconds=348&watch=true: (1m1.235066194s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:46004]
I0814 11:02:59.388919  110631 httplog.go:90] GET /apis/apps/v1/statefulsets?allowWatchBookmarks=true&resourceVersion=29420&timeout=5m44s&timeoutSeconds=344&watch=true: (1m1.232753051s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:45668]
I0814 11:02:59.394300  110631 httplog.go:90] DELETE /api/v1/nodes: (6.04974ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:59.394497  110631 controller.go:176] Shutting down kubernetes service endpoint reconciler
I0814 11:02:59.395833  110631 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.12285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
I0814 11:02:59.411436  110631 httplog.go:90] PUT /api/v1/namespaces/default/endpoints/kubernetes: (2.730834ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:51630]
--- FAIL: TestPreemptWithPermitPlugin (64.81s)
    framework_test.go:1618: Expected the preemptor pod to be scheduled. error: timed out waiting for the condition
    framework_test.go:1622: Expected the waiting pod to get preempted and deleted

				from junit_eb089aee80105aff5db0557ae4449d31f19359f2_20190814-105453.xml

Find preempt-with-permit-plugin4946b26a-944d-4220-869d-bf3f1e62fd02/waiting-pod mentions in log files | View test history on testgrid


Show 2470 Passed Tests

Show 4 Skipped Tests

Error lines from build-log.txt

... skipping 763 lines ...
W0814 10:49:46.777] I0814 10:49:46.777382   53119 clusterroleaggregation_controller.go:148] Starting ClusterRoleAggregator
W0814 10:49:46.778] I0814 10:49:46.776040   53119 serviceaccounts_controller.go:117] Starting service account controller
W0814 10:49:46.778] I0814 10:49:46.777488   53119 certificate_controller.go:113] Starting certificate controller
W0814 10:49:46.778] I0814 10:49:46.777568   53119 controller_utils.go:1029] Waiting for caches to sync for certificate controller
W0814 10:49:46.778] I0814 10:49:46.777525   53119 controller_utils.go:1029] Waiting for caches to sync for ClusterRoleAggregator controller
W0814 10:49:46.778] I0814 10:49:46.777551   53119 controller_utils.go:1029] Waiting for caches to sync for service account controller
W0814 10:49:46.778] E0814 10:49:46.778107   53119 core.go:78] Failed to start service controller: WARNING: no cloud provider provided, services of type LoadBalancer will fail
W0814 10:49:46.779] W0814 10:49:46.778136   53119 controllermanager.go:527] Skipping "service"
W0814 10:49:46.779] I0814 10:49:46.778764   53119 controllermanager.go:535] Started "statefulset"
W0814 10:49:46.779] I0814 10:49:46.779094   53119 stateful_set.go:145] Starting stateful set controller
W0814 10:49:46.779] I0814 10:49:46.779449   53119 controller_utils.go:1029] Waiting for caches to sync for stateful set controller
W0814 10:49:46.779] I0814 10:49:46.779177   53119 controllermanager.go:535] Started "cronjob"
W0814 10:49:46.780] I0814 10:49:46.779184   53119 cronjob_controller.go:96] Starting CronJob Manager
... skipping 26 lines ...
W0814 10:49:46.941] I0814 10:49:46.936307   53119 controller_utils.go:1029] Waiting for caches to sync for resource quota controller
W0814 10:49:46.942] I0814 10:49:46.936381   53119 resource_quota_monitor.go:303] QuotaMonitor running
W0814 10:49:46.942] I0814 10:49:46.937862   53119 controllermanager.go:535] Started "ttl"
W0814 10:49:46.942] I0814 10:49:46.938155   53119 ttl_controller.go:116] Starting TTL controller
W0814 10:49:46.942] I0814 10:49:46.938169   53119 node_lifecycle_controller.go:77] Sending events to api server
W0814 10:49:46.943] I0814 10:49:46.938187   53119 controller_utils.go:1029] Waiting for caches to sync for TTL controller
W0814 10:49:46.943] E0814 10:49:46.938202   53119 core.go:175] failed to start cloud node lifecycle controller: no cloud provider provided
W0814 10:49:46.943] W0814 10:49:46.938267   53119 controllermanager.go:527] Skipping "cloud-node-lifecycle"
W0814 10:49:46.943] I0814 10:49:46.938656   53119 controllermanager.go:535] Started "endpoint"
W0814 10:49:46.944] W0814 10:49:46.938719   53119 controllermanager.go:527] Skipping "csrsigning"
W0814 10:49:46.944] I0814 10:49:46.938990   53119 endpoints_controller.go:170] Starting endpoint controller
W0814 10:49:46.944] I0814 10:49:46.939016   53119 controller_utils.go:1029] Waiting for caches to sync for endpoint controller
W0814 10:49:46.959] W0814 10:49:46.959165   53119 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="127.0.0.1" does not exist
W0814 10:49:46.978] I0814 10:49:46.977843   53119 controller_utils.go:1036] Caches are synced for certificate controller
W0814 10:49:47.039] I0814 10:49:47.038552   53119 controller_utils.go:1036] Caches are synced for TTL controller
W0814 10:49:47.039] I0814 10:49:47.039270   53119 controller_utils.go:1036] Caches are synced for endpoint controller
W0814 10:49:47.047] I0814 10:49:47.046742   53119 controller_utils.go:1036] Caches are synced for ReplicaSet controller
W0814 10:49:47.052] I0814 10:49:47.051399   53119 controller_utils.go:1036] Caches are synced for PVC protection controller
W0814 10:49:47.053] I0814 10:49:47.052729   53119 controller_utils.go:1036] Caches are synced for PV protection controller
... skipping 26 lines ...
W0814 10:49:47.429] I0814 10:49:47.266873   53119 controller_utils.go:1036] Caches are synced for persistent volume controller
W0814 10:49:47.429] I0814 10:49:47.352683   53119 controller_utils.go:1036] Caches are synced for HPA controller
W0814 10:49:47.477] I0814 10:49:47.476431   53119 controller_utils.go:1036] Caches are synced for namespace controller
W0814 10:49:47.478] I0814 10:49:47.477882   53119 controller_utils.go:1036] Caches are synced for service account controller
W0814 10:49:47.478] I0814 10:49:47.477894   53119 controller_utils.go:1036] Caches are synced for ClusterRoleAggregator controller
W0814 10:49:47.482] I0814 10:49:47.481756   49657 controller.go:606] quota admission added evaluator for: serviceaccounts
W0814 10:49:47.493] E0814 10:49:47.492249   53119 clusterroleaggregation_controller.go:180] view failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "view": the object has been modified; please apply your changes to the latest version and try again
W0814 10:49:47.537] I0814 10:49:47.536401   53119 controller_utils.go:1036] Caches are synced for daemon sets controller
W0814 10:49:47.553] I0814 10:49:47.552767   53119 controller_utils.go:1036] Caches are synced for taint controller
W0814 10:49:47.554] I0814 10:49:47.553360   53119 node_lifecycle_controller.go:1189] Initializing eviction metric for zone: 
W0814 10:49:47.555] I0814 10:49:47.553674   53119 node_lifecycle_controller.go:1039] Controller detected that all Nodes are not-Ready. Entering master disruption mode.
W0814 10:49:47.555] I0814 10:49:47.553869   53119 taint_manager.go:186] Starting NoExecuteTaintManager
W0814 10:49:47.555] I0814 10:49:47.554045   53119 event.go:255] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"127.0.0.1", UID:"37d010bb-bf09-4666-8cf2-78ff7b79a62e", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node 127.0.0.1 event: Registered Node 127.0.0.1 in Controller
... skipping 67 lines ...
I0814 10:49:50.803] +++ working dir: /go/src/k8s.io/kubernetes
I0814 10:49:50.805] +++ command: run_RESTMapper_evaluation_tests
I0814 10:49:50.818] +++ [0814 10:49:50] Creating namespace namespace-1565779790-12887
I0814 10:49:50.896] namespace/namespace-1565779790-12887 created
I0814 10:49:50.970] Context "test" modified.
I0814 10:49:50.977] +++ [0814 10:49:50] Testing RESTMapper
I0814 10:49:51.083] +++ [0814 10:49:51] "kubectl get unknownresourcetype" returns error as expected: error: the server doesn't have a resource type "unknownresourcetype"
I0814 10:49:51.097] +++ exit code: 0
I0814 10:49:51.215] NAME                              SHORTNAMES   APIGROUP                       NAMESPACED   KIND
I0814 10:49:51.216] bindings                                                                      true         Binding
I0814 10:49:51.217] componentstatuses                 cs                                          false        ComponentStatus
I0814 10:49:51.217] configmaps                        cm                                          true         ConfigMap
I0814 10:49:51.218] endpoints                         ep                                          true         Endpoints
... skipping 643 lines ...
I0814 10:50:09.499] core.sh:186: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0814 10:50:09.667] (Bcore.sh:190: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0814 10:50:09.768] (Bcore.sh:194: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0814 10:50:09.935] (Bcore.sh:198: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0814 10:50:10.032] (Bcore.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0814 10:50:10.164] (Bpod "valid-pod" force deleted
W0814 10:50:10.265] error: resource(s) were provided, but no name, label selector, or --all flag specified
W0814 10:50:10.265] error: setting 'all' parameter but found a non empty selector. 
W0814 10:50:10.266] warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
I0814 10:50:10.367] core.sh:206: Successful get pods -l'name in (valid-pod)' {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:50:10.456] (Bcore.sh:211: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:: :
I0814 10:50:10.569] (Bnamespace/test-kubectl-describe-pod created
I0814 10:50:10.706] core.sh:215: Successful get namespaces/test-kubectl-describe-pod {{.metadata.name}}: test-kubectl-describe-pod
I0814 10:50:10.837] (Bcore.sh:219: Successful get secrets --namespace=test-kubectl-describe-pod {{range.items}}{{.metadata.name}}:{{end}}: 
... skipping 11 lines ...
I0814 10:50:12.189] (Bpoddisruptionbudget.policy/test-pdb-3 created
I0814 10:50:12.334] core.sh:251: Successful get pdb/test-pdb-3 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 2
I0814 10:50:12.444] (Bpoddisruptionbudget.policy/test-pdb-4 created
I0814 10:50:12.587] core.sh:255: Successful get pdb/test-pdb-4 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 50%
I0814 10:50:12.819] (Bcore.sh:261: Successful get pods --namespace=test-kubectl-describe-pod {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:50:13.086] (Bpod/env-test-pod created
W0814 10:50:13.187] error: min-available and max-unavailable cannot be both specified
I0814 10:50:13.382] core.sh:264: Successful describe pods --namespace=test-kubectl-describe-pod env-test-pod:
I0814 10:50:13.383] Name:         env-test-pod
I0814 10:50:13.383] Namespace:    test-kubectl-describe-pod
I0814 10:50:13.383] Priority:     0
I0814 10:50:13.383] Node:         <none>
I0814 10:50:13.383] Labels:       <none>
... skipping 173 lines ...
I0814 10:50:27.378] (Bpod/valid-pod patched
I0814 10:50:27.474] core.sh:470: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: changed-with-yaml:
I0814 10:50:27.553] (Bpod/valid-pod patched
I0814 10:50:27.646] core.sh:475: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:3.1:
I0814 10:50:27.822] (Bpod/valid-pod patched
I0814 10:50:27.922] core.sh:491: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx:
I0814 10:50:28.105] (B+++ [0814 10:50:28] "kubectl patch with resourceVersion 497" returns error as expected: Error from server (Conflict): Operation cannot be fulfilled on pods "valid-pod": the object has been modified; please apply your changes to the latest version and try again
I0814 10:50:28.360] pod "valid-pod" deleted
I0814 10:50:28.372] pod/valid-pod replaced
I0814 10:50:28.472] core.sh:515: Successful get pod valid-pod {{(index .spec.containers 0).name}}: replaced-k8s-serve-hostname
I0814 10:50:28.633] (BSuccessful
I0814 10:50:28.633] message:error: --grace-period must have --force specified
I0814 10:50:28.633] has:\-\-grace-period must have \-\-force specified
I0814 10:50:28.786] Successful
I0814 10:50:28.786] message:error: --timeout must have --force specified
I0814 10:50:28.786] has:\-\-timeout must have \-\-force specified
I0814 10:50:28.941] node/node-v1-test created
W0814 10:50:29.042] W0814 10:50:28.941294   53119 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="node-v1-test" does not exist
I0814 10:50:29.142] node/node-v1-test replaced
I0814 10:50:29.208] core.sh:552: Successful get node node-v1-test {{.metadata.annotations.a}}: b
I0814 10:50:29.295] (Bnode "node-v1-test" deleted
I0814 10:50:29.394] core.sh:559: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx:
I0814 10:50:29.681] (Bcore.sh:562: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: k8s.gcr.io/serve_hostname:
I0814 10:50:30.708] (Bcore.sh:575: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod
... skipping 66 lines ...
I0814 10:50:34.968] save-config.sh:31: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:50:35.143] (Bpod/test-pod created
W0814 10:50:35.244] Edit cancelled, no changes made.
W0814 10:50:35.245] Edit cancelled, no changes made.
W0814 10:50:35.245] Edit cancelled, no changes made.
W0814 10:50:35.245] Edit cancelled, no changes made.
W0814 10:50:35.245] error: 'name' already has a value (valid-pod), and --overwrite is false
W0814 10:50:35.245] warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
W0814 10:50:35.246] Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
I0814 10:50:35.346] pod "test-pod" deleted
I0814 10:50:35.346] +++ [0814 10:50:35] Creating namespace namespace-1565779835-22048
I0814 10:50:35.423] namespace/namespace-1565779835-22048 created
I0814 10:50:35.499] Context "test" modified.
... skipping 41 lines ...
I0814 10:50:38.794] +++ Running case: test-cmd.run_kubectl_create_error_tests 
I0814 10:50:38.796] +++ working dir: /go/src/k8s.io/kubernetes
I0814 10:50:38.799] +++ command: run_kubectl_create_error_tests
I0814 10:50:38.811] +++ [0814 10:50:38] Creating namespace namespace-1565779838-23775
I0814 10:50:38.891] namespace/namespace-1565779838-23775 created
I0814 10:50:38.970] Context "test" modified.
I0814 10:50:38.977] +++ [0814 10:50:38] Testing kubectl create with error
W0814 10:50:39.078] Error: must specify one of -f and -k
W0814 10:50:39.079] 
W0814 10:50:39.079] Create a resource from a file or from stdin.
W0814 10:50:39.079] 
W0814 10:50:39.080]  JSON and YAML formats are accepted.
W0814 10:50:39.080] 
W0814 10:50:39.080] Examples:
... skipping 41 lines ...
W0814 10:50:39.085] 
W0814 10:50:39.085] Usage:
W0814 10:50:39.085]   kubectl create -f FILENAME [options]
W0814 10:50:39.085] 
W0814 10:50:39.085] Use "kubectl <command> --help" for more information about a given command.
W0814 10:50:39.085] Use "kubectl options" for a list of global command-line options (applies to all commands).
I0814 10:50:39.220] +++ [0814 10:50:39] "kubectl create with empty string list returns error as expected: error: error validating "hack/testdata/invalid-rc-with-empty-args.yaml": error validating data: ValidationError(ReplicationController.spec.template.spec.containers[0].args): unknown object type "nil" in ReplicationController.spec.template.spec.containers[0].args[0]; if you choose to ignore these errors, turn validation off with --validate=false
W0814 10:50:39.321] kubectl convert is DEPRECATED and will be removed in a future version.
W0814 10:50:39.321] In order to convert, kubectl apply the object to the cluster, then kubectl get at the desired version.
I0814 10:50:39.422] +++ exit code: 0
I0814 10:50:39.453] Recording: run_kubectl_apply_tests
I0814 10:50:39.453] Running command: run_kubectl_apply_tests
I0814 10:50:39.478] 
... skipping 19 lines ...
W0814 10:50:41.624] I0814 10:50:41.623810   49657 client.go:354] parsed scheme: ""
W0814 10:50:41.625] I0814 10:50:41.623919   49657 client.go:354] scheme "" not registered, fallback to default scheme
W0814 10:50:41.625] I0814 10:50:41.623993   49657 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
W0814 10:50:41.625] I0814 10:50:41.624158   49657 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
W0814 10:50:41.626] I0814 10:50:41.625140   49657 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
W0814 10:50:41.628] I0814 10:50:41.627712   49657 controller.go:606] quota admission added evaluator for: resources.mygroup.example.com
W0814 10:50:41.716] Error from server (NotFound): resources.mygroup.example.com "myobj" not found
I0814 10:50:41.817] kind.mygroup.example.com/myobj serverside-applied (server dry run)
I0814 10:50:41.818] customresourcedefinition.apiextensions.k8s.io "resources.mygroup.example.com" deleted
I0814 10:50:41.842] +++ exit code: 0
I0814 10:50:41.878] Recording: run_kubectl_run_tests
I0814 10:50:41.879] Running command: run_kubectl_run_tests
I0814 10:50:41.900] 
... skipping 92 lines ...
I0814 10:50:44.392] Context "test" modified.
I0814 10:50:44.398] +++ [0814 10:50:44] Testing kubectl create filter
I0814 10:50:44.487] create.sh:30: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:50:44.690] (Bpod/selector-test-pod created
I0814 10:50:44.787] create.sh:34: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod
I0814 10:50:44.875] (BSuccessful
I0814 10:50:44.875] message:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found
I0814 10:50:44.876] has:pods "selector-test-pod-dont-apply" not found
I0814 10:50:44.952] pod "selector-test-pod" deleted
I0814 10:50:44.972] +++ exit code: 0
I0814 10:50:45.004] Recording: run_kubectl_apply_deployments_tests
I0814 10:50:45.005] Running command: run_kubectl_apply_deployments_tests
I0814 10:50:45.026] 
... skipping 34 lines ...
W0814 10:50:47.399] I0814 10:50:47.302249   53119 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1565779845-31503", Name:"nginx", UID:"346dfbeb-39ef-4b07-ae21-da7b40ac1fbd", APIVersion:"apps/v1", ResourceVersion:"578", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx-7dbc4d9f to 3
W0814 10:50:47.399] I0814 10:50:47.307758   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779845-31503", Name:"nginx-7dbc4d9f", UID:"1819778d-4715-4b81-9085-601901b49747", APIVersion:"apps/v1", ResourceVersion:"579", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-7dbc4d9f-dhpkp
W0814 10:50:47.400] I0814 10:50:47.315863   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779845-31503", Name:"nginx-7dbc4d9f", UID:"1819778d-4715-4b81-9085-601901b49747", APIVersion:"apps/v1", ResourceVersion:"579", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-7dbc4d9f-n9tm7
W0814 10:50:47.400] I0814 10:50:47.316050   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779845-31503", Name:"nginx-7dbc4d9f", UID:"1819778d-4715-4b81-9085-601901b49747", APIVersion:"apps/v1", ResourceVersion:"579", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-7dbc4d9f-6ccqr
I0814 10:50:47.501] apps.sh:148: Successful get deployment nginx {{.metadata.name}}: nginx
I0814 10:50:51.628] (BSuccessful
I0814 10:50:51.628] message:Error from server (Conflict): error when applying patch:
I0814 10:50:51.629] {"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1565779845-31503\",\"resourceVersion\":\"99\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx2\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx2\"}},\"spec\":{\"containers\":[{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"},"resourceVersion":"99"},"spec":{"selector":{"matchLabels":{"name":"nginx2"}},"template":{"metadata":{"labels":{"name":"nginx2"}}}}}
I0814 10:50:51.629] to:
I0814 10:50:51.629] Resource: "apps/v1, Resource=deployments", GroupVersionKind: "apps/v1, Kind=Deployment"
I0814 10:50:51.630] Name: "nginx", Namespace: "namespace-1565779845-31503"
I0814 10:50:51.633] Object: &{map["apiVersion":"apps/v1" "kind":"Deployment" "metadata":map["annotations":map["deployment.kubernetes.io/revision":"1" "kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1565779845-31503\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx1\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx1\"}},\"spec\":{\"containers\":[{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"] "creationTimestamp":"2019-08-14T10:50:47Z" "generation":'\x01' "labels":map["name":"nginx"] "managedFields":[map["apiVersion":"apps/v1" "fields":map["f:metadata":map["f:annotations":map["f:deployment.kubernetes.io/revision":map[]]] "f:status":map["f:conditions":map[".":map[] "k:{\"type\":\"Available\"}":map[".":map[] "f:lastTransitionTime":map[] "f:lastUpdateTime":map[] "f:message":map[] "f:reason":map[] "f:status":map[] "f:type":map[]] "k:{\"type\":\"Progressing\"}":map[".":map[] "f:lastTransitionTime":map[] "f:lastUpdateTime":map[] "f:message":map[] "f:reason":map[] "f:status":map[] "f:type":map[]]] "f:observedGeneration":map[] "f:replicas":map[] "f:unavailableReplicas":map[] "f:updatedReplicas":map[]]] "manager":"kube-controller-manager" "operation":"Update" "time":"2019-08-14T10:50:47Z"] map["apiVersion":"apps/v1" "fields":map["f:metadata":map["f:annotations":map[".":map[] "f:kubectl.kubernetes.io/last-applied-configuration":map[]] "f:labels":map[".":map[] "f:name":map[]]] "f:spec":map["f:progressDeadlineSeconds":map[] "f:replicas":map[] "f:revisionHistoryLimit":map[] "f:selector":map["f:matchLabels":map[".":map[] "f:name":map[]]] "f:strategy":map["f:rollingUpdate":map[".":map[] "f:maxSurge":map[] "f:maxUnavailable":map[]] "f:type":map[]] "f:template":map["f:metadata":map["f:labels":map[".":map[] "f:name":map[]]] "f:spec":map["f:containers":map["k:{\"name\":\"nginx\"}":map[".":map[] "f:image":map[] "f:imagePullPolicy":map[] "f:name":map[] "f:ports":map[".":map[] "k:{\"containerPort\":80,\"protocol\":\"TCP\"}":map[".":map[] "f:containerPort":map[] "f:protocol":map[]]] "f:resources":map[] "f:terminationMessagePath":map[] "f:terminationMessagePolicy":map[]]] "f:dnsPolicy":map[] "f:restartPolicy":map[] "f:schedulerName":map[] "f:securityContext":map[] "f:terminationGracePeriodSeconds":map[]]]]] "manager":"kubectl" "operation":"Update" "time":"2019-08-14T10:50:47Z"]] "name":"nginx" "namespace":"namespace-1565779845-31503" "resourceVersion":"591" "selfLink":"/apis/apps/v1/namespaces/namespace-1565779845-31503/deployments/nginx" "uid":"346dfbeb-39ef-4b07-ae21-da7b40ac1fbd"] "spec":map["progressDeadlineSeconds":'\u0258' "replicas":'\x03' "revisionHistoryLimit":'\n' "selector":map["matchLabels":map["name":"nginx1"]] "strategy":map["rollingUpdate":map["maxSurge":"25%" "maxUnavailable":"25%"] "type":"RollingUpdate"] "template":map["metadata":map["creationTimestamp":<nil> "labels":map["name":"nginx1"]] "spec":map["containers":[map["image":"k8s.gcr.io/nginx:test-cmd" "imagePullPolicy":"IfNotPresent" "name":"nginx" "ports":[map["containerPort":'P' "protocol":"TCP"]] "resources":map[] "terminationMessagePath":"/dev/termination-log" "terminationMessagePolicy":"File"]] "dnsPolicy":"ClusterFirst" "restartPolicy":"Always" "schedulerName":"default-scheduler" "securityContext":map[] "terminationGracePeriodSeconds":'\x1e']]] "status":map["conditions":[map["lastTransitionTime":"2019-08-14T10:50:47Z" "lastUpdateTime":"2019-08-14T10:50:47Z" "message":"Deployment does not have minimum availability." "reason":"MinimumReplicasUnavailable" "status":"False" "type":"Available"] map["lastTransitionTime":"2019-08-14T10:50:47Z" "lastUpdateTime":"2019-08-14T10:50:47Z" "message":"ReplicaSet \"nginx-7dbc4d9f\" is progressing." "reason":"ReplicaSetUpdated" "status":"True" "type":"Progressing"]] "observedGeneration":'\x01' "replicas":'\x03' "unavailableReplicas":'\x03' "updatedReplicas":'\x03']]}
I0814 10:50:51.633] for: "hack/testdata/deployment-label-change2.yaml": Operation cannot be fulfilled on deployments.apps "nginx": the object has been modified; please apply your changes to the latest version and try again
I0814 10:50:51.634] has:Error from server (Conflict)
W0814 10:50:53.203] I0814 10:50:53.202144   53119 horizontal.go:341] Horizontal Pod Autoscaler frontend has been deleted in namespace-1565779836-891
W0814 10:50:55.956] E0814 10:50:55.955584   53119 replica_set.go:450] Sync "namespace-1565779845-31503/nginx-7dbc4d9f" failed with Operation cannot be fulfilled on replicasets.apps "nginx-7dbc4d9f": StorageError: invalid object, Code: 4, Key: /registry/replicasets/namespace-1565779845-31503/nginx-7dbc4d9f, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 1819778d-4715-4b81-9085-601901b49747, UID in object meta: 
I0814 10:50:56.927] deployment.apps/nginx configured
W0814 10:50:57.028] I0814 10:50:56.932337   53119 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1565779845-31503", Name:"nginx", UID:"508b8bf9-dc6f-4bf7-9e6d-a8440d1d36a8", APIVersion:"apps/v1", ResourceVersion:"615", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx-594f77b9f6 to 3
W0814 10:50:57.029] I0814 10:50:56.936760   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779845-31503", Name:"nginx-594f77b9f6", UID:"5f897123-85e8-4054-96a4-b6f477a973f7", APIVersion:"apps/v1", ResourceVersion:"616", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-594f77b9f6-29l69
W0814 10:50:57.029] I0814 10:50:56.943914   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779845-31503", Name:"nginx-594f77b9f6", UID:"5f897123-85e8-4054-96a4-b6f477a973f7", APIVersion:"apps/v1", ResourceVersion:"616", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-594f77b9f6-tdncj
W0814 10:50:57.030] I0814 10:50:56.947179   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779845-31503", Name:"nginx-594f77b9f6", UID:"5f897123-85e8-4054-96a4-b6f477a973f7", APIVersion:"apps/v1", ResourceVersion:"616", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-594f77b9f6-rzkrq
I0814 10:50:57.130] Successful
I0814 10:50:57.131] message:        "name": "nginx2"
I0814 10:50:57.131]           "name": "nginx2"
I0814 10:50:57.131] has:"name": "nginx2"
W0814 10:51:01.305] E0814 10:51:01.304353   53119 replica_set.go:450] Sync "namespace-1565779845-31503/nginx-594f77b9f6" failed with Operation cannot be fulfilled on replicasets.apps "nginx-594f77b9f6": StorageError: invalid object, Code: 4, Key: /registry/replicasets/namespace-1565779845-31503/nginx-594f77b9f6, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 5f897123-85e8-4054-96a4-b6f477a973f7, UID in object meta: 
W0814 10:51:02.294] I0814 10:51:02.293058   53119 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1565779845-31503", Name:"nginx", UID:"2eb2e060-eb96-40e6-83dd-c4238f2624e8", APIVersion:"apps/v1", ResourceVersion:"647", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx-594f77b9f6 to 3
W0814 10:51:02.298] I0814 10:51:02.297797   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779845-31503", Name:"nginx-594f77b9f6", UID:"6ebc898a-f3e9-437e-afbf-0087e51db6e2", APIVersion:"apps/v1", ResourceVersion:"648", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-594f77b9f6-thncx
W0814 10:51:02.302] I0814 10:51:02.301787   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779845-31503", Name:"nginx-594f77b9f6", UID:"6ebc898a-f3e9-437e-afbf-0087e51db6e2", APIVersion:"apps/v1", ResourceVersion:"648", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-594f77b9f6-gflb6
W0814 10:51:02.303] I0814 10:51:02.302442   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779845-31503", Name:"nginx-594f77b9f6", UID:"6ebc898a-f3e9-437e-afbf-0087e51db6e2", APIVersion:"apps/v1", ResourceVersion:"648", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-594f77b9f6-qx66v
I0814 10:51:02.403] Successful
I0814 10:51:02.404] message:The Deployment "nginx" is invalid: spec.template.metadata.labels: Invalid value: map[string]string{"name":"nginx3"}: `selector` does not match template `labels`
... skipping 159 lines ...
I0814 10:51:04.309] +++ [0814 10:51:04] Creating namespace namespace-1565779864-28631
I0814 10:51:04.390] namespace/namespace-1565779864-28631 created
I0814 10:51:04.461] Context "test" modified.
I0814 10:51:04.467] +++ [0814 10:51:04] Testing kubectl get
I0814 10:51:04.563] get.sh:29: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:51:04.650] (BSuccessful
I0814 10:51:04.651] message:Error from server (NotFound): pods "abc" not found
I0814 10:51:04.651] has:pods "abc" not found
I0814 10:51:04.742] get.sh:37: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:51:04.828] (BSuccessful
I0814 10:51:04.829] message:Error from server (NotFound): pods "abc" not found
I0814 10:51:04.829] has:pods "abc" not found
I0814 10:51:04.919] get.sh:45: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:51:05.004] (BSuccessful
I0814 10:51:05.004] message:{
I0814 10:51:05.004]     "apiVersion": "v1",
I0814 10:51:05.004]     "items": [],
... skipping 23 lines ...
I0814 10:51:05.343] has not:No resources found
I0814 10:51:05.432] Successful
I0814 10:51:05.432] message:NAME
I0814 10:51:05.432] has not:No resources found
I0814 10:51:05.522] get.sh:73: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:51:05.622] (BSuccessful
I0814 10:51:05.623] message:error: the server doesn't have a resource type "foobar"
I0814 10:51:05.623] has not:No resources found
I0814 10:51:05.707] Successful
I0814 10:51:05.708] message:No resources found in namespace-1565779864-28631 namespace.
I0814 10:51:05.708] has:No resources found
I0814 10:51:05.795] Successful
I0814 10:51:05.796] message:
I0814 10:51:05.796] has not:No resources found
I0814 10:51:05.881] Successful
I0814 10:51:05.881] message:No resources found in namespace-1565779864-28631 namespace.
I0814 10:51:05.881] has:No resources found
I0814 10:51:05.973] get.sh:93: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:51:06.064] (BSuccessful
I0814 10:51:06.065] message:Error from server (NotFound): pods "abc" not found
I0814 10:51:06.065] has:pods "abc" not found
I0814 10:51:06.066] FAIL!
I0814 10:51:06.066] message:Error from server (NotFound): pods "abc" not found
I0814 10:51:06.067] has not:List
I0814 10:51:06.067] 99 /go/src/k8s.io/kubernetes/test/cmd/../../test/cmd/get.sh
I0814 10:51:06.184] Successful
I0814 10:51:06.185] message:I0814 10:51:06.130793   63693 loader.go:375] Config loaded from file:  /tmp/tmp.LD8cK6Yz7f/.kube/config
I0814 10:51:06.185] I0814 10:51:06.137960   63693 round_trippers.go:471] GET http://127.0.0.1:8080/version?timeout=32s 200 OK in 6 milliseconds
I0814 10:51:06.185] I0814 10:51:06.160044   63693 round_trippers.go:471] GET http://127.0.0.1:8080/api/v1/namespaces/default/pods 200 OK in 2 milliseconds
... skipping 660 lines ...
I0814 10:51:11.762] Successful
I0814 10:51:11.762] message:NAME    DATA   AGE
I0814 10:51:11.762] one     0      0s
I0814 10:51:11.762] three   0      0s
I0814 10:51:11.762] two     0      0s
I0814 10:51:11.762] STATUS    REASON          MESSAGE
I0814 10:51:11.762] Failure   InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
I0814 10:51:11.763] has not:watch is only supported on individual resources
I0814 10:51:12.856] Successful
I0814 10:51:12.856] message:STATUS    REASON          MESSAGE
I0814 10:51:12.857] Failure   InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
I0814 10:51:12.857] has not:watch is only supported on individual resources
I0814 10:51:12.863] +++ [0814 10:51:12] Creating namespace namespace-1565779872-8684
I0814 10:51:12.934] namespace/namespace-1565779872-8684 created
I0814 10:51:13.008] Context "test" modified.
I0814 10:51:13.102] get.sh:157: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:51:13.262] (Bpod/valid-pod created
... skipping 104 lines ...
I0814 10:51:13.365] }
I0814 10:51:13.445] get.sh:162: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0814 10:51:13.689] (B<no value>Successful
I0814 10:51:13.690] message:valid-pod:
I0814 10:51:13.690] has:valid-pod:
I0814 10:51:13.772] Successful
I0814 10:51:13.772] message:error: error executing jsonpath "{.missing}": Error executing template: missing is not found. Printing more information for debugging the template:
I0814 10:51:13.772] 	template was:
I0814 10:51:13.772] 		{.missing}
I0814 10:51:13.773] 	object given to jsonpath engine was:
I0814 10:51:13.774] 		map[string]interface {}{"apiVersion":"v1", "kind":"Pod", "metadata":map[string]interface {}{"creationTimestamp":"2019-08-14T10:51:13Z", "labels":map[string]interface {}{"name":"valid-pod"}, "managedFields":[]interface {}{map[string]interface {}{"apiVersion":"v1", "fields":map[string]interface {}{"f:metadata":map[string]interface {}{"f:labels":map[string]interface {}{".":map[string]interface {}{}, "f:name":map[string]interface {}{}}}, "f:spec":map[string]interface {}{"f:containers":map[string]interface {}{"k:{\"name\":\"kubernetes-serve-hostname\"}":map[string]interface {}{".":map[string]interface {}{}, "f:image":map[string]interface {}{}, "f:imagePullPolicy":map[string]interface {}{}, "f:name":map[string]interface {}{}, "f:resources":map[string]interface {}{".":map[string]interface {}{}, "f:limits":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}, "f:requests":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}}, "f:terminationMessagePath":map[string]interface {}{}, "f:terminationMessagePolicy":map[string]interface {}{}}}, "f:dnsPolicy":map[string]interface {}{}, "f:enableServiceLinks":map[string]interface {}{}, "f:priority":map[string]interface {}{}, "f:restartPolicy":map[string]interface {}{}, "f:schedulerName":map[string]interface {}{}, "f:securityContext":map[string]interface {}{}, "f:terminationGracePeriodSeconds":map[string]interface {}{}}}, "manager":"kubectl", "operation":"Update", "time":"2019-08-14T10:51:13Z"}}, "name":"valid-pod", "namespace":"namespace-1565779872-8684", "resourceVersion":"690", "selfLink":"/api/v1/namespaces/namespace-1565779872-8684/pods/valid-pod", "uid":"eb50c157-727e-4221-800f-22912f5ef625"}, "spec":map[string]interface {}{"containers":[]interface {}{map[string]interface {}{"image":"k8s.gcr.io/serve_hostname", "imagePullPolicy":"Always", "name":"kubernetes-serve-hostname", "resources":map[string]interface {}{"limits":map[string]interface {}{"cpu":"1", "memory":"512Mi"}, "requests":map[string]interface {}{"cpu":"1", "memory":"512Mi"}}, "terminationMessagePath":"/dev/termination-log", "terminationMessagePolicy":"File"}}, "dnsPolicy":"ClusterFirst", "enableServiceLinks":true, "priority":0, "restartPolicy":"Always", "schedulerName":"default-scheduler", "securityContext":map[string]interface {}{}, "terminationGracePeriodSeconds":30}, "status":map[string]interface {}{"phase":"Pending", "qosClass":"Guaranteed"}}
I0814 10:51:13.774] has:missing is not found
I0814 10:51:13.860] Successful
I0814 10:51:13.860] message:Error executing template: template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing". Printing more information for debugging the template:
I0814 10:51:13.860] 	template was:
I0814 10:51:13.860] 		{{.missing}}
I0814 10:51:13.860] 	raw data was:
I0814 10:51:13.862] 		{"apiVersion":"v1","kind":"Pod","metadata":{"creationTimestamp":"2019-08-14T10:51:13Z","labels":{"name":"valid-pod"},"managedFields":[{"apiVersion":"v1","fields":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"kubernetes-serve-hostname\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{".":{},"f:limits":{".":{},"f:cpu":{},"f:memory":{}},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:priority":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}},"manager":"kubectl","operation":"Update","time":"2019-08-14T10:51:13Z"}],"name":"valid-pod","namespace":"namespace-1565779872-8684","resourceVersion":"690","selfLink":"/api/v1/namespaces/namespace-1565779872-8684/pods/valid-pod","uid":"eb50c157-727e-4221-800f-22912f5ef625"},"spec":{"containers":[{"image":"k8s.gcr.io/serve_hostname","imagePullPolicy":"Always","name":"kubernetes-serve-hostname","resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"1","memory":"512Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","enableServiceLinks":true,"priority":0,"restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30},"status":{"phase":"Pending","qosClass":"Guaranteed"}}
I0814 10:51:13.862] 	object given to template engine was:
I0814 10:51:13.863] 		map[apiVersion:v1 kind:Pod metadata:map[creationTimestamp:2019-08-14T10:51:13Z labels:map[name:valid-pod] managedFields:[map[apiVersion:v1 fields:map[f:metadata:map[f:labels:map[.:map[] f:name:map[]]] f:spec:map[f:containers:map[k:{"name":"kubernetes-serve-hostname"}:map[.:map[] f:image:map[] f:imagePullPolicy:map[] f:name:map[] f:resources:map[.:map[] f:limits:map[.:map[] f:cpu:map[] f:memory:map[]] f:requests:map[.:map[] f:cpu:map[] f:memory:map[]]] f:terminationMessagePath:map[] f:terminationMessagePolicy:map[]]] f:dnsPolicy:map[] f:enableServiceLinks:map[] f:priority:map[] f:restartPolicy:map[] f:schedulerName:map[] f:securityContext:map[] f:terminationGracePeriodSeconds:map[]]] manager:kubectl operation:Update time:2019-08-14T10:51:13Z]] name:valid-pod namespace:namespace-1565779872-8684 resourceVersion:690 selfLink:/api/v1/namespaces/namespace-1565779872-8684/pods/valid-pod uid:eb50c157-727e-4221-800f-22912f5ef625] spec:map[containers:[map[image:k8s.gcr.io/serve_hostname imagePullPolicy:Always name:kubernetes-serve-hostname resources:map[limits:map[cpu:1 memory:512Mi] requests:map[cpu:1 memory:512Mi]] terminationMessagePath:/dev/termination-log terminationMessagePolicy:File]] dnsPolicy:ClusterFirst enableServiceLinks:true priority:0 restartPolicy:Always schedulerName:default-scheduler securityContext:map[] terminationGracePeriodSeconds:30] status:map[phase:Pending qosClass:Guaranteed]]
I0814 10:51:13.863] has:map has no entry for key "missing"
W0814 10:51:13.964] error: error executing template "{{.missing}}": template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing"
I0814 10:51:14.949] Successful
I0814 10:51:14.950] message:NAME        READY   STATUS    RESTARTS   AGE
I0814 10:51:14.950] valid-pod   0/1     Pending   0          0s
I0814 10:51:14.950] STATUS      REASON          MESSAGE
I0814 10:51:14.950] Failure     InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
I0814 10:51:14.950] has:STATUS
I0814 10:51:14.952] Successful
I0814 10:51:14.952] message:NAME        READY   STATUS    RESTARTS   AGE
I0814 10:51:14.952] valid-pod   0/1     Pending   0          0s
I0814 10:51:14.953] STATUS      REASON          MESSAGE
I0814 10:51:14.953] Failure     InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
I0814 10:51:14.953] has:valid-pod
I0814 10:51:16.041] Successful
I0814 10:51:16.042] message:pod/valid-pod
I0814 10:51:16.042] has not:STATUS
I0814 10:51:16.044] Successful
I0814 10:51:16.045] message:pod/valid-pod
... skipping 144 lines ...
I0814 10:51:17.166] status:
I0814 10:51:17.166]   phase: Pending
I0814 10:51:17.166]   qosClass: Guaranteed
I0814 10:51:17.166] ---
I0814 10:51:17.166] has:name: valid-pod
I0814 10:51:17.244] Successful
I0814 10:51:17.244] message:Error from server (NotFound): pods "invalid-pod" not found
I0814 10:51:17.244] has:"invalid-pod" not found
I0814 10:51:17.337] pod "valid-pod" deleted
I0814 10:51:17.444] get.sh:200: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:51:17.604] (Bpod/redis-master created
I0814 10:51:17.609] pod/valid-pod created
I0814 10:51:17.716] Successful
... skipping 35 lines ...
I0814 10:51:18.994] +++ command: run_kubectl_exec_pod_tests
I0814 10:51:19.008] +++ [0814 10:51:19] Creating namespace namespace-1565779879-29142
I0814 10:51:19.090] namespace/namespace-1565779879-29142 created
I0814 10:51:19.175] Context "test" modified.
I0814 10:51:19.182] +++ [0814 10:51:19] Testing kubectl exec POD COMMAND
I0814 10:51:19.273] Successful
I0814 10:51:19.273] message:Error from server (NotFound): pods "abc" not found
I0814 10:51:19.273] has:pods "abc" not found
I0814 10:51:19.442] pod/test-pod created
I0814 10:51:19.562] Successful
I0814 10:51:19.562] message:Error from server (BadRequest): pod test-pod does not have a host assigned
I0814 10:51:19.562] has not:pods "test-pod" not found
I0814 10:51:19.564] Successful
I0814 10:51:19.565] message:Error from server (BadRequest): pod test-pod does not have a host assigned
I0814 10:51:19.565] has not:pod or type/name must be specified
I0814 10:51:19.658] pod "test-pod" deleted
I0814 10:51:19.680] +++ exit code: 0
I0814 10:51:19.717] Recording: run_kubectl_exec_resource_name_tests
I0814 10:51:19.717] Running command: run_kubectl_exec_resource_name_tests
I0814 10:51:19.740] 
... skipping 2 lines ...
I0814 10:51:19.748] +++ command: run_kubectl_exec_resource_name_tests
I0814 10:51:19.760] +++ [0814 10:51:19] Creating namespace namespace-1565779879-14198
I0814 10:51:19.846] namespace/namespace-1565779879-14198 created
I0814 10:51:19.923] Context "test" modified.
I0814 10:51:19.931] +++ [0814 10:51:19] Testing kubectl exec TYPE/NAME COMMAND
I0814 10:51:20.048] Successful
I0814 10:51:20.049] message:error: the server doesn't have a resource type "foo"
I0814 10:51:20.049] has:error:
I0814 10:51:20.143] Successful
I0814 10:51:20.143] message:Error from server (NotFound): deployments.apps "bar" not found
I0814 10:51:20.143] has:"bar" not found
I0814 10:51:20.310] pod/test-pod created
I0814 10:51:20.482] replicaset.apps/frontend created
W0814 10:51:20.583] I0814 10:51:20.487029   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779879-14198", Name:"frontend", UID:"57155df3-d6fb-4c68-8e4d-4f02b385519b", APIVersion:"apps/v1", ResourceVersion:"743", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-w6s2p
W0814 10:51:20.583] I0814 10:51:20.491526   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779879-14198", Name:"frontend", UID:"57155df3-d6fb-4c68-8e4d-4f02b385519b", APIVersion:"apps/v1", ResourceVersion:"743", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-268st
W0814 10:51:20.584] I0814 10:51:20.491941   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779879-14198", Name:"frontend", UID:"57155df3-d6fb-4c68-8e4d-4f02b385519b", APIVersion:"apps/v1", ResourceVersion:"743", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-t6k9f
I0814 10:51:20.684] configmap/test-set-env-config created
I0814 10:51:20.767] Successful
I0814 10:51:20.767] message:error: cannot attach to *v1.ConfigMap: selector for *v1.ConfigMap not implemented
I0814 10:51:20.767] has:not implemented
I0814 10:51:20.876] Successful
I0814 10:51:20.876] message:Error from server (BadRequest): pod test-pod does not have a host assigned
I0814 10:51:20.876] has not:not found
I0814 10:51:20.878] Successful
I0814 10:51:20.878] message:Error from server (BadRequest): pod test-pod does not have a host assigned
I0814 10:51:20.879] has not:pod or type/name must be specified
I0814 10:51:20.993] Successful
I0814 10:51:20.993] message:Error from server (BadRequest): pod frontend-268st does not have a host assigned
I0814 10:51:20.993] has not:not found
I0814 10:51:20.995] Successful
I0814 10:51:20.995] message:Error from server (BadRequest): pod frontend-268st does not have a host assigned
I0814 10:51:20.996] has not:pod or type/name must be specified
I0814 10:51:21.083] pod "test-pod" deleted
I0814 10:51:21.180] replicaset.apps "frontend" deleted
I0814 10:51:21.275] configmap "test-set-env-config" deleted
I0814 10:51:21.295] +++ exit code: 0
I0814 10:51:21.332] Recording: run_create_secret_tests
I0814 10:51:21.333] Running command: run_create_secret_tests
I0814 10:51:21.356] 
I0814 10:51:21.358] +++ Running case: test-cmd.run_create_secret_tests 
I0814 10:51:21.361] +++ working dir: /go/src/k8s.io/kubernetes
I0814 10:51:21.364] +++ command: run_create_secret_tests
I0814 10:51:21.469] Successful
I0814 10:51:21.470] message:Error from server (NotFound): secrets "mysecret" not found
I0814 10:51:21.470] has:secrets "mysecret" not found
I0814 10:51:21.646] Successful
I0814 10:51:21.646] message:Error from server (NotFound): secrets "mysecret" not found
I0814 10:51:21.646] has:secrets "mysecret" not found
I0814 10:51:21.647] Successful
I0814 10:51:21.647] message:user-specified
I0814 10:51:21.647] has:user-specified
I0814 10:51:21.728] Successful
I0814 10:51:21.819] {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","selfLink":"/api/v1/namespaces/default/configmaps/tester-update-cm","uid":"34fa453d-1c06-4187-a490-7b195c02edb5","resourceVersion":"764","creationTimestamp":"2019-08-14T10:51:21Z"}}
... skipping 2 lines ...
I0814 10:51:22.001] has:uid
I0814 10:51:22.092] Successful
I0814 10:51:22.093] message:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","selfLink":"/api/v1/namespaces/default/configmaps/tester-update-cm","uid":"34fa453d-1c06-4187-a490-7b195c02edb5","resourceVersion":"765","creationTimestamp":"2019-08-14T10:51:21Z","managedFields":[{"manager":"kubectl","operation":"Update","apiVersion":"v1","time":"2019-08-14T10:51:21Z","fields":{"f:data":{"f:key1":{},".":{}}}}]},"data":{"key1":"config1"}}
I0814 10:51:22.093] has:config1
I0814 10:51:22.173] {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Success","details":{"name":"tester-update-cm","kind":"configmaps","uid":"34fa453d-1c06-4187-a490-7b195c02edb5"}}
I0814 10:51:22.274] Successful
I0814 10:51:22.275] message:Error from server (NotFound): configmaps "tester-update-cm" not found
I0814 10:51:22.275] has:configmaps "tester-update-cm" not found
I0814 10:51:22.291] +++ exit code: 0
I0814 10:51:22.334] Recording: run_kubectl_create_kustomization_directory_tests
I0814 10:51:22.335] Running command: run_kubectl_create_kustomization_directory_tests
I0814 10:51:22.359] 
I0814 10:51:22.361] +++ Running case: test-cmd.run_kubectl_create_kustomization_directory_tests 
... skipping 158 lines ...
I0814 10:51:25.325] valid-pod   0/1     Pending   0          0s
I0814 10:51:25.325] has:valid-pod
I0814 10:51:26.418] Successful
I0814 10:51:26.418] message:NAME        READY   STATUS    RESTARTS   AGE
I0814 10:51:26.419] valid-pod   0/1     Pending   0          0s
I0814 10:51:26.419] STATUS      REASON          MESSAGE
I0814 10:51:26.419] Failure     InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
I0814 10:51:26.419] has:Timeout exceeded while reading body
I0814 10:51:26.510] Successful
I0814 10:51:26.510] message:NAME        READY   STATUS    RESTARTS   AGE
I0814 10:51:26.510] valid-pod   0/1     Pending   0          1s
I0814 10:51:26.510] has:valid-pod
I0814 10:51:26.585] Successful
I0814 10:51:26.586] message:error: Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)
I0814 10:51:26.586] has:Invalid timeout value
I0814 10:51:26.698] pod "valid-pod" deleted
I0814 10:51:26.720] +++ exit code: 0
I0814 10:51:26.754] Recording: run_crd_tests
I0814 10:51:26.755] Running command: run_crd_tests
I0814 10:51:26.775] 
... skipping 244 lines ...
I0814 10:51:31.499] foo.company.com/test patched
I0814 10:51:31.599] crd.sh:236: Successful get foos/test {{.patched}}: value1
I0814 10:51:31.690] (Bfoo.company.com/test patched
I0814 10:51:31.785] crd.sh:238: Successful get foos/test {{.patched}}: value2
I0814 10:51:31.871] (Bfoo.company.com/test patched
I0814 10:51:31.965] crd.sh:240: Successful get foos/test {{.patched}}: <no value>
I0814 10:51:32.128] (B+++ [0814 10:51:32] "kubectl patch --local" returns error as expected for CustomResource: error: cannot apply strategic merge patch for company.com/v1, Kind=Foo locally, try --type merge
I0814 10:51:32.195] {
I0814 10:51:32.195]     "apiVersion": "company.com/v1",
I0814 10:51:32.195]     "kind": "Foo",
I0814 10:51:32.195]     "metadata": {
I0814 10:51:32.195]         "annotations": {
I0814 10:51:32.196]             "kubernetes.io/change-cause": "kubectl patch foos/test --server=http://127.0.0.1:8080 --match-server-version=true --patch={\"patched\":null} --type=merge --record=true"
... skipping 356 lines ...
I0814 10:51:55.720] (Bnamespace/non-native-resources created
I0814 10:51:55.896] bar.company.com/test created
I0814 10:51:56.000] crd.sh:455: Successful get bars {{len .items}}: 1
I0814 10:51:56.089] (Bnamespace "non-native-resources" deleted
I0814 10:52:01.333] crd.sh:458: Successful get bars {{len .items}}: 0
I0814 10:52:01.497] (Bcustomresourcedefinition.apiextensions.k8s.io "foos.company.com" deleted
W0814 10:52:01.598] Error from server (NotFound): namespaces "non-native-resources" not found
I0814 10:52:01.699] customresourcedefinition.apiextensions.k8s.io "bars.company.com" deleted
I0814 10:52:01.705] customresourcedefinition.apiextensions.k8s.io "resources.mygroup.example.com" deleted
I0814 10:52:01.818] customresourcedefinition.apiextensions.k8s.io "validfoos.company.com" deleted
I0814 10:52:01.856] +++ exit code: 0
I0814 10:52:01.900] Recording: run_cmd_with_img_tests
I0814 10:52:01.900] Running command: run_cmd_with_img_tests
... skipping 10 lines ...
I0814 10:52:02.215] has:deployment.apps/test1 created
W0814 10:52:02.315] kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
W0814 10:52:02.316] I0814 10:52:02.205895   53119 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1565779921-1726", Name:"test1", UID:"6f7c892a-03ce-451d-9f3c-ecedebec107a", APIVersion:"apps/v1", ResourceVersion:"919", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set test1-9797f89d8 to 1
W0814 10:52:02.317] I0814 10:52:02.212729   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779921-1726", Name:"test1-9797f89d8", UID:"2ebbc610-ddac-46cd-8106-17e8c42bd547", APIVersion:"apps/v1", ResourceVersion:"920", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: test1-9797f89d8-mszxs
I0814 10:52:02.418] deployment.apps "test1" deleted
I0814 10:52:02.418] Successful
I0814 10:52:02.419] message:error: Invalid image name "InvalidImageName": invalid reference format
I0814 10:52:02.419] has:error: Invalid image name "InvalidImageName": invalid reference format
I0814 10:52:02.430] +++ exit code: 0
I0814 10:52:02.477] +++ [0814 10:52:02] Testing recursive resources
I0814 10:52:02.484] +++ [0814 10:52:02] Creating namespace namespace-1565779922-32587
I0814 10:52:02.566] namespace/namespace-1565779922-32587 created
I0814 10:52:02.645] Context "test" modified.
W0814 10:52:02.745] W0814 10:52:02.512640   49657 cacher.go:154] Terminating all watchers from cacher *unstructured.Unstructured
W0814 10:52:02.746] E0814 10:52:02.514309   53119 reflector.go:282] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:02.746] W0814 10:52:02.617181   49657 cacher.go:154] Terminating all watchers from cacher *unstructured.Unstructured
W0814 10:52:02.746] E0814 10:52:02.619128   53119 reflector.go:282] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:02.746] W0814 10:52:02.716510   49657 cacher.go:154] Terminating all watchers from cacher *unstructured.Unstructured
W0814 10:52:02.746] E0814 10:52:02.718224   53119 reflector.go:282] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:02.833] W0814 10:52:02.832832   49657 cacher.go:154] Terminating all watchers from cacher *unstructured.Unstructured
W0814 10:52:02.835] E0814 10:52:02.834357   53119 reflector.go:282] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:02.935] generic-resources.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:52:03.055] (Bgeneric-resources.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:03.059] (BSuccessful
I0814 10:52:03.060] message:pod/busybox0 created
I0814 10:52:03.060] pod/busybox1 created
I0814 10:52:03.061] error: error validating "hack/testdata/recursive/pod/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
I0814 10:52:03.061] has:error validating data: kind not set
I0814 10:52:03.167] generic-resources.sh:211: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:03.357] (Bgeneric-resources.sh:220: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: busybox:busybox:
I0814 10:52:03.359] (BSuccessful
I0814 10:52:03.359] message:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0814 10:52:03.360] has:Object 'Kind' is missing
I0814 10:52:03.458] generic-resources.sh:227: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:03.767] (Bgeneric-resources.sh:231: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced:
I0814 10:52:03.769] (BSuccessful
I0814 10:52:03.770] message:pod/busybox0 replaced
I0814 10:52:03.770] pod/busybox1 replaced
I0814 10:52:03.770] error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
I0814 10:52:03.771] has:error validating data: kind not set
I0814 10:52:03.864] generic-resources.sh:236: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:03.956] (BSuccessful
I0814 10:52:03.957] message:Name:         busybox0
I0814 10:52:03.957] Namespace:    namespace-1565779922-32587
I0814 10:52:03.958] Priority:     0
I0814 10:52:03.958] Node:         <none>
... skipping 159 lines ...
I0814 10:52:03.986] has:Object 'Kind' is missing
I0814 10:52:04.051] generic-resources.sh:246: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:04.235] (Bgeneric-resources.sh:250: Successful get pods {{range.items}}{{.metadata.annotations.annotatekey}}:{{end}}: annotatevalue:annotatevalue:
I0814 10:52:04.238] (BSuccessful
I0814 10:52:04.238] message:pod/busybox0 annotated
I0814 10:52:04.238] pod/busybox1 annotated
I0814 10:52:04.239] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0814 10:52:04.239] has:Object 'Kind' is missing
I0814 10:52:04.330] generic-resources.sh:255: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:04.599] (Bgeneric-resources.sh:259: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced:
I0814 10:52:04.601] (BSuccessful
I0814 10:52:04.602] message:Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
I0814 10:52:04.602] pod/busybox0 configured
I0814 10:52:04.603] Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
I0814 10:52:04.603] pod/busybox1 configured
I0814 10:52:04.603] error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
I0814 10:52:04.604] has:error validating data: kind not set
I0814 10:52:04.694] generic-resources.sh:265: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:52:04.871] (Bdeployment.apps/nginx created
W0814 10:52:04.972] E0814 10:52:03.516344   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:04.973] E0814 10:52:03.620903   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:04.973] E0814 10:52:03.719887   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:04.974] E0814 10:52:03.836016   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:04.974] E0814 10:52:04.517914   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:04.975] E0814 10:52:04.622641   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:04.975] E0814 10:52:04.721527   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:04.976] E0814 10:52:04.837383   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:04.976] I0814 10:52:04.876430   53119 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1565779922-32587", Name:"nginx", UID:"6c2bf922-40cc-4f98-9378-3b89e4899b93", APIVersion:"apps/v1", ResourceVersion:"945", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx-bbbbb95b5 to 3
W0814 10:52:04.977] I0814 10:52:04.881523   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779922-32587", Name:"nginx-bbbbb95b5", UID:"2ed01eed-4f73-4626-80f0-28c46c1e46b4", APIVersion:"apps/v1", ResourceVersion:"946", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-bbbbb95b5-r5hjl
W0814 10:52:04.978] I0814 10:52:04.885384   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779922-32587", Name:"nginx-bbbbb95b5", UID:"2ed01eed-4f73-4626-80f0-28c46c1e46b4", APIVersion:"apps/v1", ResourceVersion:"946", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-bbbbb95b5-96xq9
W0814 10:52:04.978] I0814 10:52:04.885822   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779922-32587", Name:"nginx-bbbbb95b5", UID:"2ed01eed-4f73-4626-80f0-28c46c1e46b4", APIVersion:"apps/v1", ResourceVersion:"946", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-bbbbb95b5-f84vb
I0814 10:52:05.079] generic-resources.sh:269: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx:
I0814 10:52:05.080] (Bgeneric-resources.sh:270: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd:
... skipping 44 lines ...
I0814 10:52:05.338] deployment.apps "nginx" deleted
I0814 10:52:05.442] generic-resources.sh:281: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:05.630] (Bgeneric-resources.sh:285: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:05.633] (BSuccessful
I0814 10:52:05.633] message:kubectl convert is DEPRECATED and will be removed in a future version.
I0814 10:52:05.633] In order to convert, kubectl apply the object to the cluster, then kubectl get at the desired version.
I0814 10:52:05.634] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0814 10:52:05.634] has:Object 'Kind' is missing
I0814 10:52:05.730] generic-resources.sh:290: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:05.824] (BSuccessful
I0814 10:52:05.825] message:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0814 10:52:05.825] has:busybox0:busybox1:
I0814 10:52:05.826] Successful
I0814 10:52:05.827] message:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0814 10:52:05.827] has:Object 'Kind' is missing
W0814 10:52:05.928] kubectl convert is DEPRECATED and will be removed in a future version.
W0814 10:52:05.929] In order to convert, kubectl apply the object to the cluster, then kubectl get at the desired version.
W0814 10:52:05.929] E0814 10:52:05.519663   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:05.929] E0814 10:52:05.623909   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:05.930] E0814 10:52:05.723354   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:05.930] E0814 10:52:05.838981   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:06.030] generic-resources.sh:299: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:06.049] (Bpod/busybox0 labeled
I0814 10:52:06.049] pod/busybox1 labeled
I0814 10:52:06.049] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0814 10:52:06.141] generic-resources.sh:304: Successful get pods {{range.items}}{{.metadata.labels.mylabel}}:{{end}}: myvalue:myvalue:
I0814 10:52:06.144] (BSuccessful
I0814 10:52:06.144] message:pod/busybox0 labeled
I0814 10:52:06.144] pod/busybox1 labeled
I0814 10:52:06.145] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0814 10:52:06.145] has:Object 'Kind' is missing
I0814 10:52:06.243] generic-resources.sh:309: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:06.330] (Bpod/busybox0 patched
I0814 10:52:06.331] pod/busybox1 patched
I0814 10:52:06.331] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0814 10:52:06.425] generic-resources.sh:314: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: prom/busybox:prom/busybox:
I0814 10:52:06.427] (BSuccessful
I0814 10:52:06.427] message:pod/busybox0 patched
I0814 10:52:06.427] pod/busybox1 patched
I0814 10:52:06.428] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0814 10:52:06.428] has:Object 'Kind' is missing
I0814 10:52:06.518] generic-resources.sh:319: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:06.703] (Bgeneric-resources.sh:323: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:52:06.705] (BSuccessful
I0814 10:52:06.706] message:warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
I0814 10:52:06.706] pod "busybox0" force deleted
I0814 10:52:06.706] pod "busybox1" force deleted
I0814 10:52:06.706] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0814 10:52:06.707] has:Object 'Kind' is missing
I0814 10:52:06.800] generic-resources.sh:328: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:52:06.959] (Breplicationcontroller/busybox0 created
I0814 10:52:06.964] replicationcontroller/busybox1 created
W0814 10:52:07.065] E0814 10:52:06.521410   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:07.065] E0814 10:52:06.625314   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:07.066] E0814 10:52:06.725305   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:07.066] E0814 10:52:06.840534   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:07.066] error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
W0814 10:52:07.066] I0814 10:52:06.964686   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1565779922-32587", Name:"busybox0", UID:"5d0ff73e-42bc-453b-a25d-08d42bcb6063", APIVersion:"v1", ResourceVersion:"976", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox0-brsqf
W0814 10:52:07.067] I0814 10:52:06.969191   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1565779922-32587", Name:"busybox1", UID:"f09c00ef-a4ea-4519-88e9-48cf8eef6e5b", APIVersion:"v1", ResourceVersion:"978", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox1-fffc8
I0814 10:52:07.167] generic-resources.sh:332: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:07.167] (Bgeneric-resources.sh:337: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:07.258] (Bgeneric-resources.sh:338: Successful get rc busybox0 {{.spec.replicas}}: 1
I0814 10:52:07.359] (Bgeneric-resources.sh:339: Successful get rc busybox1 {{.spec.replicas}}: 1
I0814 10:52:07.556] (Bgeneric-resources.sh:344: Successful get hpa busybox0 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 80
I0814 10:52:07.650] (Bgeneric-resources.sh:345: Successful get hpa busybox1 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 80
I0814 10:52:07.653] (BSuccessful
I0814 10:52:07.653] message:horizontalpodautoscaler.autoscaling/busybox0 autoscaled
I0814 10:52:07.653] horizontalpodautoscaler.autoscaling/busybox1 autoscaled
I0814 10:52:07.654] error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0814 10:52:07.654] has:Object 'Kind' is missing
I0814 10:52:07.735] horizontalpodautoscaler.autoscaling "busybox0" deleted
I0814 10:52:07.828] horizontalpodautoscaler.autoscaling "busybox1" deleted
I0814 10:52:07.929] generic-resources.sh:353: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:08.026] (Bgeneric-resources.sh:354: Successful get rc busybox0 {{.spec.replicas}}: 1
I0814 10:52:08.122] (Bgeneric-resources.sh:355: Successful get rc busybox1 {{.spec.replicas}}: 1
I0814 10:52:08.318] (Bgeneric-resources.sh:359: Successful get service busybox0 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80
I0814 10:52:08.414] (Bgeneric-resources.sh:360: Successful get service busybox1 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80
I0814 10:52:08.417] (BSuccessful
I0814 10:52:08.417] message:service/busybox0 exposed
I0814 10:52:08.418] service/busybox1 exposed
I0814 10:52:08.418] error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0814 10:52:08.418] has:Object 'Kind' is missing
I0814 10:52:08.521] generic-resources.sh:366: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:08.614] (Bgeneric-resources.sh:367: Successful get rc busybox0 {{.spec.replicas}}: 1
I0814 10:52:08.709] (Bgeneric-resources.sh:368: Successful get rc busybox1 {{.spec.replicas}}: 1
I0814 10:52:08.934] (Bgeneric-resources.sh:372: Successful get rc busybox0 {{.spec.replicas}}: 2
I0814 10:52:09.032] (Bgeneric-resources.sh:373: Successful get rc busybox1 {{.spec.replicas}}: 2
I0814 10:52:09.036] (BSuccessful
I0814 10:52:09.036] message:replicationcontroller/busybox0 scaled
I0814 10:52:09.036] replicationcontroller/busybox1 scaled
I0814 10:52:09.037] error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0814 10:52:09.037] has:Object 'Kind' is missing
I0814 10:52:09.143] generic-resources.sh:378: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:09.335] (Bgeneric-resources.sh:382: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:52:09.337] (BSuccessful
I0814 10:52:09.338] message:warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
I0814 10:52:09.338] replicationcontroller "busybox0" force deleted
I0814 10:52:09.338] replicationcontroller "busybox1" force deleted
I0814 10:52:09.339] error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0814 10:52:09.339] has:Object 'Kind' is missing
I0814 10:52:09.434] generic-resources.sh:387: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:52:09.596] (Bdeployment.apps/nginx1-deployment created
I0814 10:52:09.603] deployment.apps/nginx0-deployment created
W0814 10:52:09.704] E0814 10:52:07.522849   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:09.705] E0814 10:52:07.627271   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:09.705] E0814 10:52:07.726919   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:09.705] E0814 10:52:07.841947   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:09.706] E0814 10:52:08.524437   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:09.706] E0814 10:52:08.628818   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:09.706] E0814 10:52:08.728543   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:09.706] I0814 10:52:08.812545   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1565779922-32587", Name:"busybox0", UID:"5d0ff73e-42bc-453b-a25d-08d42bcb6063", APIVersion:"v1", ResourceVersion:"997", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox0-kx9c4
W0814 10:52:09.707] I0814 10:52:08.836090   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1565779922-32587", Name:"busybox1", UID:"f09c00ef-a4ea-4519-88e9-48cf8eef6e5b", APIVersion:"v1", ResourceVersion:"1003", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox1-cbwl2
W0814 10:52:09.707] E0814 10:52:08.843055   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:09.707] E0814 10:52:09.526159   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:09.707] error: error validating "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
W0814 10:52:09.708] I0814 10:52:09.613563   53119 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1565779922-32587", Name:"nginx0-deployment", UID:"e174437e-271d-4897-b19e-b8003866d3c6", APIVersion:"apps/v1", ResourceVersion:"1019", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx0-deployment-57475bf54d to 2
W0814 10:52:09.708] I0814 10:52:09.613865   53119 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1565779922-32587", Name:"nginx1-deployment", UID:"12e66334-1098-4c3f-b1c1-037076ceb9f0", APIVersion:"apps/v1", ResourceVersion:"1018", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx1-deployment-84f7f49fb7 to 2
W0814 10:52:09.708] I0814 10:52:09.616853   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779922-32587", Name:"nginx1-deployment-84f7f49fb7", UID:"4a9fc2e1-c930-4ded-a7d2-af3eb306454d", APIVersion:"apps/v1", ResourceVersion:"1020", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx1-deployment-84f7f49fb7-svcwx
W0814 10:52:09.709] I0814 10:52:09.621354   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779922-32587", Name:"nginx0-deployment-57475bf54d", UID:"f1fb3d51-71dc-4d4f-833c-4a19705f56cd", APIVersion:"apps/v1", ResourceVersion:"1021", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx0-deployment-57475bf54d-4dngj
W0814 10:52:09.709] I0814 10:52:09.621843   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779922-32587", Name:"nginx1-deployment-84f7f49fb7", UID:"4a9fc2e1-c930-4ded-a7d2-af3eb306454d", APIVersion:"apps/v1", ResourceVersion:"1020", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx1-deployment-84f7f49fb7-jfldk
W0814 10:52:09.709] I0814 10:52:09.625564   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565779922-32587", Name:"nginx0-deployment-57475bf54d", UID:"f1fb3d51-71dc-4d4f-833c-4a19705f56cd", APIVersion:"apps/v1", ResourceVersion:"1021", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx0-deployment-57475bf54d-jf6jp
W0814 10:52:09.709] E0814 10:52:09.631742   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:09.731] E0814 10:52:09.730455   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:09.831] generic-resources.sh:391: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx0-deployment:nginx1-deployment:
I0814 10:52:09.832] (Bgeneric-resources.sh:392: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9:k8s.gcr.io/nginx:1.7.9:
I0814 10:52:10.042] (Bgeneric-resources.sh:396: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9:k8s.gcr.io/nginx:1.7.9:
I0814 10:52:10.050] (BSuccessful
I0814 10:52:10.050] message:deployment.apps/nginx1-deployment skipped rollback (current template already matches revision 1)
I0814 10:52:10.050] deployment.apps/nginx0-deployment skipped rollback (current template already matches revision 1)
I0814 10:52:10.051] error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0814 10:52:10.051] has:Object 'Kind' is missing
W0814 10:52:10.152] E0814 10:52:09.845032   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:10.252] deployment.apps/nginx1-deployment paused
I0814 10:52:10.253] deployment.apps/nginx0-deployment paused
I0814 10:52:10.277] generic-resources.sh:404: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: true:true:
I0814 10:52:10.279] (BSuccessful
I0814 10:52:10.279] message:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0814 10:52:10.279] has:Object 'Kind' is missing
I0814 10:52:10.379] deployment.apps/nginx1-deployment resumed
I0814 10:52:10.387] deployment.apps/nginx0-deployment resumed
I0814 10:52:10.494] generic-resources.sh:410: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: <no value>:<no value>:
I0814 10:52:10.496] (BSuccessful
I0814 10:52:10.497] message:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0814 10:52:10.497] has:Object 'Kind' is missing
W0814 10:52:10.598] E0814 10:52:10.527880   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:10.634] E0814 10:52:10.633486   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:10.688] warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
W0814 10:52:10.705] error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
W0814 10:52:10.733] E0814 10:52:10.732348   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:10.834] Successful
I0814 10:52:10.834] message:deployment.apps/nginx1-deployment 
I0814 10:52:10.834] REVISION  CHANGE-CAUSE
I0814 10:52:10.834] 1         <none>
I0814 10:52:10.834] 
I0814 10:52:10.834] deployment.apps/nginx0-deployment 
I0814 10:52:10.834] REVISION  CHANGE-CAUSE
I0814 10:52:10.835] 1         <none>
I0814 10:52:10.835] 
I0814 10:52:10.835] error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0814 10:52:10.835] has:nginx0-deployment
I0814 10:52:10.835] Successful
I0814 10:52:10.835] message:deployment.apps/nginx1-deployment 
I0814 10:52:10.835] REVISION  CHANGE-CAUSE
I0814 10:52:10.836] 1         <none>
I0814 10:52:10.836] 
I0814 10:52:10.836] deployment.apps/nginx0-deployment 
I0814 10:52:10.836] REVISION  CHANGE-CAUSE
I0814 10:52:10.836] 1         <none>
I0814 10:52:10.836] 
I0814 10:52:10.836] error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0814 10:52:10.836] has:nginx1-deployment
I0814 10:52:10.837] Successful
I0814 10:52:10.837] message:deployment.apps/nginx1-deployment 
I0814 10:52:10.837] REVISION  CHANGE-CAUSE
I0814 10:52:10.837] 1         <none>
I0814 10:52:10.837] 
I0814 10:52:10.837] deployment.apps/nginx0-deployment 
I0814 10:52:10.837] REVISION  CHANGE-CAUSE
I0814 10:52:10.838] 1         <none>
I0814 10:52:10.838] 
I0814 10:52:10.838] error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0814 10:52:10.838] has:Object 'Kind' is missing
I0814 10:52:10.838] deployment.apps "nginx1-deployment" force deleted
I0814 10:52:10.838] deployment.apps "nginx0-deployment" force deleted
W0814 10:52:10.939] E0814 10:52:10.846573   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:11.530] E0814 10:52:11.529618   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:11.636] E0814 10:52:11.635523   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:11.734] E0814 10:52:11.733935   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:11.835] generic-resources.sh:426: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:52:11.965] (Breplicationcontroller/busybox0 created
I0814 10:52:11.971] replicationcontroller/busybox1 created
W0814 10:52:12.072] E0814 10:52:11.847923   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:12.072] error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
W0814 10:52:12.073] I0814 10:52:11.971956   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1565779922-32587", Name:"busybox0", UID:"48e6953d-97c6-48df-b411-55dd48faf28b", APIVersion:"v1", ResourceVersion:"1067", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox0-5b874
W0814 10:52:12.073] I0814 10:52:11.976733   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1565779922-32587", Name:"busybox1", UID:"4e324b87-1dd6-46d4-b7b1-2de11a0314d9", APIVersion:"v1", ResourceVersion:"1069", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox1-bc5hs
I0814 10:52:12.173] generic-resources.sh:430: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0814 10:52:12.179] (BSuccessful
I0814 10:52:12.180] message:no rollbacker has been implemented for "ReplicationController"
I0814 10:52:12.180] no rollbacker has been implemented for "ReplicationController"
... skipping 3 lines ...
I0814 10:52:12.182] message:no rollbacker has been implemented for "ReplicationController"
I0814 10:52:12.183] no rollbacker has been implemented for "ReplicationController"
I0814 10:52:12.183] unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0814 10:52:12.184] has:Object 'Kind' is missing
I0814 10:52:12.282] Successful
I0814 10:52:12.283] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0814 10:52:12.283] error: replicationcontrollers "busybox0" pausing is not supported
I0814 10:52:12.283] error: replicationcontrollers "busybox1" pausing is not supported
I0814 10:52:12.284] has:Object 'Kind' is missing
I0814 10:52:12.284] Successful
I0814 10:52:12.285] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0814 10:52:12.285] error: replicationcontrollers "busybox0" pausing is not supported
I0814 10:52:12.285] error: replicationcontrollers "busybox1" pausing is not supported
I0814 10:52:12.285] has:replicationcontrollers "busybox0" pausing is not supported
I0814 10:52:12.287] Successful
I0814 10:52:12.287] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0814 10:52:12.288] error: replicationcontrollers "busybox0" pausing is not supported
I0814 10:52:12.288] error: replicationcontrollers "busybox1" pausing is not supported
I0814 10:52:12.288] has:replicationcontrollers "busybox1" pausing is not supported
I0814 10:52:12.381] Successful
I0814 10:52:12.382] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0814 10:52:12.382] error: replicationcontrollers "busybox0" resuming is not supported
I0814 10:52:12.383] error: replicationcontrollers "busybox1" resuming is not supported
I0814 10:52:12.383] has:Object 'Kind' is missing
I0814 10:52:12.383] Successful
I0814 10:52:12.384] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0814 10:52:12.384] error: replicationcontrollers "busybox0" resuming is not supported
I0814 10:52:12.384] error: replicationcontrollers "busybox1" resuming is not supported
I0814 10:52:12.385] has:replicationcontrollers "busybox0" resuming is not supported
I0814 10:52:12.386] Successful
I0814 10:52:12.386] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0814 10:52:12.387] error: replicationcontrollers "busybox0" resuming is not supported
I0814 10:52:12.387] error: replicationcontrollers "busybox1" resuming is not supported
I0814 10:52:12.388] has:replicationcontrollers "busybox0" resuming is not supported
I0814 10:52:12.467] replicationcontroller "busybox0" force deleted
I0814 10:52:12.472] replicationcontroller "busybox1" force deleted
W0814 10:52:12.573] warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
W0814 10:52:12.574] error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
W0814 10:52:12.575] E0814 10:52:12.531711   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:12.638] E0814 10:52:12.637267   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:12.736] E0814 10:52:12.735670   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:12.850] E0814 10:52:12.849675   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:13.479] Recording: run_namespace_tests
I0814 10:52:13.480] Running command: run_namespace_tests
I0814 10:52:13.501] 
I0814 10:52:13.504] +++ Running case: test-cmd.run_namespace_tests 
I0814 10:52:13.506] +++ working dir: /go/src/k8s.io/kubernetes
I0814 10:52:13.508] +++ command: run_namespace_tests
I0814 10:52:13.516] +++ [0814 10:52:13] Testing kubectl(v1:namespaces)
I0814 10:52:13.592] namespace/my-namespace created
I0814 10:52:13.691] core.sh:1308: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace
I0814 10:52:13.766] (Bnamespace "my-namespace" deleted
W0814 10:52:13.866] E0814 10:52:13.533413   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:13.867] E0814 10:52:13.638743   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:13.867] E0814 10:52:13.737139   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:13.868] E0814 10:52:13.851188   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:14.535] E0814 10:52:14.535054   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:14.641] E0814 10:52:14.640744   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:14.739] E0814 10:52:14.738602   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:14.853] E0814 10:52:14.853027   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:15.537] E0814 10:52:15.536742   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:15.643] E0814 10:52:15.642646   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:15.740] E0814 10:52:15.740076   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:15.855] E0814 10:52:15.854627   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:16.539] E0814 10:52:16.538638   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:16.644] E0814 10:52:16.644257   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:16.742] E0814 10:52:16.741820   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:16.856] E0814 10:52:16.856177   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:17.541] E0814 10:52:17.540336   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:17.646] E0814 10:52:17.646016   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:17.744] E0814 10:52:17.743517   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:17.858] E0814 10:52:17.857723   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:18.542] E0814 10:52:18.542074   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:18.648] E0814 10:52:18.647484   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:18.746] E0814 10:52:18.745098   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:18.859] namespace/my-namespace condition met
I0814 10:52:18.943] Successful
I0814 10:52:18.944] message:Error from server (NotFound): namespaces "my-namespace" not found
I0814 10:52:18.944] has: not found
I0814 10:52:19.021] namespace/my-namespace created
I0814 10:52:19.114] core.sh:1317: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace
I0814 10:52:19.322] (BSuccessful
I0814 10:52:19.323] message:warning: deleting cluster-scoped resources, not scoped to the provided namespace
I0814 10:52:19.323] namespace "kube-node-lease" deleted
... skipping 29 lines ...
I0814 10:52:19.334] namespace "namespace-1565779883-5579" deleted
I0814 10:52:19.334] namespace "namespace-1565779884-947" deleted
I0814 10:52:19.334] namespace "namespace-1565779886-11469" deleted
I0814 10:52:19.334] namespace "namespace-1565779888-19118" deleted
I0814 10:52:19.334] namespace "namespace-1565779921-1726" deleted
I0814 10:52:19.334] namespace "namespace-1565779922-32587" deleted
I0814 10:52:19.335] Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted
I0814 10:52:19.335] Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted
I0814 10:52:19.335] Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted
I0814 10:52:19.335] has:warning: deleting cluster-scoped resources
I0814 10:52:19.335] Successful
I0814 10:52:19.335] message:warning: deleting cluster-scoped resources, not scoped to the provided namespace
I0814 10:52:19.336] namespace "kube-node-lease" deleted
I0814 10:52:19.336] namespace "my-namespace" deleted
I0814 10:52:19.336] namespace "namespace-1565779788-2083" deleted
... skipping 27 lines ...
I0814 10:52:19.340] namespace "namespace-1565779883-5579" deleted
I0814 10:52:19.340] namespace "namespace-1565779884-947" deleted
I0814 10:52:19.340] namespace "namespace-1565779886-11469" deleted
I0814 10:52:19.341] namespace "namespace-1565779888-19118" deleted
I0814 10:52:19.341] namespace "namespace-1565779921-1726" deleted
I0814 10:52:19.341] namespace "namespace-1565779922-32587" deleted
I0814 10:52:19.341] Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted
I0814 10:52:19.341] Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted
I0814 10:52:19.341] Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted
I0814 10:52:19.342] has:namespace "my-namespace" deleted
I0814 10:52:19.435] core.sh:1329: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"other\" }}found{{end}}{{end}}:: :
I0814 10:52:19.514] (Bnamespace/other created
I0814 10:52:19.605] core.sh:1333: Successful get namespaces/other {{.metadata.name}}: other
I0814 10:52:19.695] (Bcore.sh:1337: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:52:19.860] (Bpod/valid-pod created
I0814 10:52:19.964] core.sh:1341: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0814 10:52:20.061] (Bcore.sh:1343: Successful get pods -n other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0814 10:52:20.145] (BSuccessful
I0814 10:52:20.145] message:error: a resource cannot be retrieved by name across all namespaces
I0814 10:52:20.146] has:a resource cannot be retrieved by name across all namespaces
I0814 10:52:20.237] core.sh:1350: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0814 10:52:20.323] (Bpod "valid-pod" force deleted
I0814 10:52:20.427] core.sh:1354: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:52:20.509] (Bnamespace "other" deleted
W0814 10:52:20.610] E0814 10:52:18.858828   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:20.611] E0814 10:52:19.543816   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:20.611] I0814 10:52:19.571746   53119 controller_utils.go:1029] Waiting for caches to sync for garbage collector controller
W0814 10:52:20.611] I0814 10:52:19.645771   53119 controller_utils.go:1029] Waiting for caches to sync for resource quota controller
W0814 10:52:20.611] E0814 10:52:19.648978   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:20.611] I0814 10:52:19.672228   53119 controller_utils.go:1036] Caches are synced for garbage collector controller
W0814 10:52:20.612] I0814 10:52:19.746473   53119 controller_utils.go:1036] Caches are synced for resource quota controller
W0814 10:52:20.612] E0814 10:52:19.746906   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:20.612] E0814 10:52:19.860456   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:20.612] warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
W0814 10:52:20.612] E0814 10:52:20.545666   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:20.651] E0814 10:52:20.650715   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:20.749] E0814 10:52:20.748794   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:20.862] E0814 10:52:20.861983   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:21.547] E0814 10:52:21.547236   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:21.653] E0814 10:52:21.652420   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:21.751] E0814 10:52:21.750766   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:21.864] E0814 10:52:21.863707   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:22.455] I0814 10:52:22.454330   53119 horizontal.go:341] Horizontal Pod Autoscaler busybox0 has been deleted in namespace-1565779922-32587
W0814 10:52:22.461] I0814 10:52:22.460256   53119 horizontal.go:341] Horizontal Pod Autoscaler busybox1 has been deleted in namespace-1565779922-32587
W0814 10:52:22.549] E0814 10:52:22.548847   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:22.655] E0814 10:52:22.654411   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:22.753] E0814 10:52:22.752516   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:22.865] E0814 10:52:22.865094   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:23.551] E0814 10:52:23.550910   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:23.656] E0814 10:52:23.655887   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:23.755] E0814 10:52:23.754357   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:23.867] E0814 10:52:23.866594   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:24.552] E0814 10:52:24.552107   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:24.657] E0814 10:52:24.656823   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:24.755] E0814 10:52:24.755197   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:24.868] E0814 10:52:24.868141   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:25.555] E0814 10:52:25.554455   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:25.656] +++ exit code: 0
I0814 10:52:25.669] Recording: run_secrets_test
I0814 10:52:25.670] Running command: run_secrets_test
I0814 10:52:25.692] 
I0814 10:52:25.694] +++ Running case: test-cmd.run_secrets_test 
I0814 10:52:25.696] +++ working dir: /go/src/k8s.io/kubernetes
... skipping 57 lines ...
I0814 10:52:27.538] (Bcore.sh:767: Successful get secret/test-secret --namespace=test-secrets {{.type}}: kubernetes.io/tls
I0814 10:52:27.610] (Bsecret "test-secret" deleted
I0814 10:52:27.689] secret/test-secret created
I0814 10:52:27.775] core.sh:773: Successful get secret/test-secret --namespace=test-secrets {{.metadata.name}}: test-secret
I0814 10:52:27.857] (Bcore.sh:774: Successful get secret/test-secret --namespace=test-secrets {{.type}}: kubernetes.io/tls
I0814 10:52:27.931] (Bsecret "test-secret" deleted
W0814 10:52:28.031] E0814 10:52:25.658273   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:28.032] E0814 10:52:25.756678   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:28.032] E0814 10:52:25.869774   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:28.032] I0814 10:52:25.943828   70085 loader.go:375] Config loaded from file:  /tmp/tmp.LD8cK6Yz7f/.kube/config
W0814 10:52:28.033] E0814 10:52:26.555595   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:28.033] E0814 10:52:26.660056   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:28.033] E0814 10:52:26.757869   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:28.033] E0814 10:52:26.871817   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:28.033] E0814 10:52:27.557008   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:28.034] E0814 10:52:27.661294   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:28.034] E0814 10:52:27.759256   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:28.034] E0814 10:52:27.873222   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:28.134] secret/secret-string-data created
I0814 10:52:28.169] core.sh:796: Successful get secret/secret-string-data --namespace=test-secrets  {{.data}}: map[k1:djE= k2:djI=]
I0814 10:52:28.249] (Bcore.sh:797: Successful get secret/secret-string-data --namespace=test-secrets  {{.data}}: map[k1:djE= k2:djI=]
I0814 10:52:28.333] (Bcore.sh:798: Successful get secret/secret-string-data --namespace=test-secrets  {{.stringData}}: <no value>
I0814 10:52:28.404] (Bsecret "secret-string-data" deleted
I0814 10:52:28.495] core.sh:807: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:52:28.648] (Bsecret "test-secret" deleted
I0814 10:52:28.723] namespace "test-secrets" deleted
W0814 10:52:28.823] E0814 10:52:28.558236   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:28.824] E0814 10:52:28.662620   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:28.824] E0814 10:52:28.760569   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:28.875] E0814 10:52:28.874718   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:29.560] E0814 10:52:29.559661   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:29.664] E0814 10:52:29.664075   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:29.762] E0814 10:52:29.762075   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:29.876] E0814 10:52:29.876187   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:30.561] E0814 10:52:30.561016   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:30.666] E0814 10:52:30.665672   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:30.764] E0814 10:52:30.763661   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:30.878] E0814 10:52:30.877998   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:31.563] E0814 10:52:31.562527   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:31.667] E0814 10:52:31.667025   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:31.765] E0814 10:52:31.765223   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:31.880] E0814 10:52:31.879607   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:32.564] E0814 10:52:32.564052   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:32.669] E0814 10:52:32.668412   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:32.767] E0814 10:52:32.766716   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:32.882] E0814 10:52:32.881321   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:33.566] E0814 10:52:33.565601   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:33.670] E0814 10:52:33.669978   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:33.768] E0814 10:52:33.767634   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:33.868] +++ exit code: 0
I0814 10:52:33.869] Recording: run_configmap_tests
I0814 10:52:33.869] Running command: run_configmap_tests
I0814 10:52:33.869] 
I0814 10:52:33.870] +++ Running case: test-cmd.run_configmap_tests 
I0814 10:52:33.873] +++ working dir: /go/src/k8s.io/kubernetes
I0814 10:52:33.875] +++ command: run_configmap_tests
I0814 10:52:33.886] +++ [0814 10:52:33] Creating namespace namespace-1565779953-26034
I0814 10:52:33.959] namespace/namespace-1565779953-26034 created
I0814 10:52:34.029] Context "test" modified.
I0814 10:52:34.035] +++ [0814 10:52:34] Testing configmaps
W0814 10:52:34.136] E0814 10:52:33.882646   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:34.237] configmap/test-configmap created
I0814 10:52:34.312] core.sh:28: Successful get configmap/test-configmap {{.metadata.name}}: test-configmap
I0814 10:52:34.386] (Bconfigmap "test-configmap" deleted
I0814 10:52:34.481] core.sh:33: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-configmaps\" }}found{{end}}{{end}}:: :
I0814 10:52:34.551] (Bnamespace/test-configmaps created
I0814 10:52:34.636] core.sh:37: Successful get namespaces/test-configmaps {{.metadata.name}}: test-configmaps
I0814 10:52:34.724] (Bcore.sh:41: Successful get configmaps {{range.items}}{{ if eq .metadata.name \"test-configmap\" }}found{{end}}{{end}}:: :
I0814 10:52:34.809] (Bcore.sh:42: Successful get configmaps {{range.items}}{{ if eq .metadata.name \"test-binary-configmap\" }}found{{end}}{{end}}:: :
I0814 10:52:34.879] (Bconfigmap/test-configmap created
I0814 10:52:34.956] configmap/test-binary-configmap created
I0814 10:52:35.044] core.sh:48: Successful get configmap/test-configmap --namespace=test-configmaps {{.metadata.name}}: test-configmap
I0814 10:52:35.181] (Bcore.sh:49: Successful get configmap/test-binary-configmap --namespace=test-configmaps {{.metadata.name}}: test-binary-configmap
I0814 10:52:35.678] (Bconfigmap "test-configmap" deleted
W0814 10:52:35.780] E0814 10:52:34.566893   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:35.781] E0814 10:52:34.671440   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:35.782] E0814 10:52:34.768954   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:35.783] E0814 10:52:34.883916   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:35.783] E0814 10:52:35.569606   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:35.784] E0814 10:52:35.674193   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:35.784] E0814 10:52:35.771215   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:35.885] configmap "test-binary-configmap" deleted
W0814 10:52:35.987] E0814 10:52:35.887134   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:36.089] namespace "test-configmaps" deleted
W0814 10:52:36.572] E0814 10:52:36.571448   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:36.676] E0814 10:52:36.675935   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:36.773] E0814 10:52:36.772866   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:36.888] E0814 10:52:36.888071   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:37.574] E0814 10:52:37.573230   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:37.678] E0814 10:52:37.677528   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:37.775] E0814 10:52:37.774761   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:37.890] E0814 10:52:37.889732   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:38.575] E0814 10:52:38.575047   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:38.679] E0814 10:52:38.679113   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:38.776] E0814 10:52:38.775977   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:38.892] E0814 10:52:38.891787   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:39.577] E0814 10:52:39.576937   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:39.681] E0814 10:52:39.680609   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:39.778] E0814 10:52:39.777322   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:39.894] E0814 10:52:39.893502   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:40.579] E0814 10:52:40.578490   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:40.682] E0814 10:52:40.682095   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:40.779] E0814 10:52:40.778952   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:40.895] E0814 10:52:40.895045   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:41.157] +++ exit code: 0
I0814 10:52:41.194] Recording: run_client_config_tests
I0814 10:52:41.195] Running command: run_client_config_tests
I0814 10:52:41.214] 
I0814 10:52:41.216] +++ Running case: test-cmd.run_client_config_tests 
I0814 10:52:41.219] +++ working dir: /go/src/k8s.io/kubernetes
I0814 10:52:41.221] +++ command: run_client_config_tests
I0814 10:52:41.234] +++ [0814 10:52:41] Creating namespace namespace-1565779961-27576
I0814 10:52:41.308] namespace/namespace-1565779961-27576 created
I0814 10:52:41.377] Context "test" modified.
I0814 10:52:41.384] +++ [0814 10:52:41] Testing client config
I0814 10:52:41.458] Successful
I0814 10:52:41.459] message:error: stat missing: no such file or directory
I0814 10:52:41.459] has:missing: no such file or directory
I0814 10:52:41.529] Successful
I0814 10:52:41.529] message:error: stat missing: no such file or directory
I0814 10:52:41.529] has:missing: no such file or directory
I0814 10:52:41.602] Successful
I0814 10:52:41.603] message:error: stat missing: no such file or directory
I0814 10:52:41.603] has:missing: no such file or directory
I0814 10:52:41.677] Successful
I0814 10:52:41.678] message:Error in configuration: context was not found for specified context: missing-context
I0814 10:52:41.679] has:context was not found for specified context: missing-context
I0814 10:52:41.748] Successful
I0814 10:52:41.748] message:error: no server found for cluster "missing-cluster"
I0814 10:52:41.748] has:no server found for cluster "missing-cluster"
I0814 10:52:41.824] Successful
I0814 10:52:41.825] message:error: auth info "missing-user" does not exist
I0814 10:52:41.825] has:auth info "missing-user" does not exist
W0814 10:52:41.926] E0814 10:52:41.579973   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:41.926] E0814 10:52:41.683575   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:41.927] E0814 10:52:41.780642   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:41.927] E0814 10:52:41.896641   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:42.028] Successful
I0814 10:52:42.028] message:error: error loading config file "/tmp/newconfig.yaml": no kind "Config" is registered for version "v-1" in scheme "k8s.io/client-go/tools/clientcmd/api/latest/latest.go:50"
I0814 10:52:42.028] has:error loading config file
I0814 10:52:42.047] Successful
I0814 10:52:42.047] message:error: stat missing-config: no such file or directory
I0814 10:52:42.048] has:no such file or directory
I0814 10:52:42.059] +++ exit code: 0
I0814 10:52:42.099] Recording: run_service_accounts_tests
I0814 10:52:42.100] Running command: run_service_accounts_tests
I0814 10:52:42.123] 
I0814 10:52:42.125] +++ Running case: test-cmd.run_service_accounts_tests 
... skipping 7 lines ...
I0814 10:52:42.476] (Bnamespace/test-service-accounts created
I0814 10:52:42.571] core.sh:832: Successful get namespaces/test-service-accounts {{.metadata.name}}: test-service-accounts
I0814 10:52:42.653] (Bserviceaccount/test-service-account created
I0814 10:52:42.756] core.sh:838: Successful get serviceaccount/test-service-account --namespace=test-service-accounts {{.metadata.name}}: test-service-account
I0814 10:52:42.837] (Bserviceaccount "test-service-account" deleted
I0814 10:52:42.925] namespace "test-service-accounts" deleted
W0814 10:52:43.026] E0814 10:52:42.581644   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:43.026] E0814 10:52:42.685173   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:43.027] E0814 10:52:42.782128   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:43.027] E0814 10:52:42.898199   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:43.584] E0814 10:52:43.583144   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:43.687] E0814 10:52:43.686787   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:43.785] E0814 10:52:43.784049   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:43.901] E0814 10:52:43.900136   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:44.585] E0814 10:52:44.584897   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:44.689] E0814 10:52:44.688413   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:44.786] E0814 10:52:44.785762   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:44.902] E0814 10:52:44.901995   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:45.587] E0814 10:52:45.586781   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:45.690] E0814 10:52:45.690072   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:45.788] E0814 10:52:45.787495   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:45.904] E0814 10:52:45.903513   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:46.589] E0814 10:52:46.588544   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:46.693] E0814 10:52:46.692559   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:46.789] E0814 10:52:46.788979   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:46.906] E0814 10:52:46.905569   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:47.591] E0814 10:52:47.590323   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:47.694] E0814 10:52:47.694190   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:47.791] E0814 10:52:47.790760   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:47.908] E0814 10:52:47.907182   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:48.048] +++ exit code: 0
I0814 10:52:48.081] Recording: run_job_tests
I0814 10:52:48.082] Running command: run_job_tests
I0814 10:52:48.100] 
I0814 10:52:48.102] +++ Running case: test-cmd.run_job_tests 
I0814 10:52:48.105] +++ working dir: /go/src/k8s.io/kubernetes
... skipping 14 lines ...
I0814 10:52:48.869] Labels:                        run=pi
I0814 10:52:48.869] Annotations:                   <none>
I0814 10:52:48.869] Schedule:                      59 23 31 2 *
I0814 10:52:48.869] Concurrency Policy:            Allow
I0814 10:52:48.870] Suspend:                       False
I0814 10:52:48.870] Successful Job History Limit:  3
I0814 10:52:48.870] Failed Job History Limit:      1
I0814 10:52:48.870] Starting Deadline Seconds:     <unset>
I0814 10:52:48.870] Selector:                      <unset>
I0814 10:52:48.870] Parallelism:                   <unset>
I0814 10:52:48.870] Completions:                   <unset>
I0814 10:52:48.870] Pod Template:
I0814 10:52:48.870]   Labels:  run=pi
... skipping 32 lines ...
I0814 10:52:49.410]                 run=pi
I0814 10:52:49.410] Annotations:    cronjob.kubernetes.io/instantiate: manual
I0814 10:52:49.410] Controlled By:  CronJob/pi
I0814 10:52:49.410] Parallelism:    1
I0814 10:52:49.411] Completions:    1
I0814 10:52:49.411] Start Time:     Wed, 14 Aug 2019 10:52:49 +0000
I0814 10:52:49.411] Pods Statuses:  1 Running / 0 Succeeded / 0 Failed
I0814 10:52:49.411] Pod Template:
I0814 10:52:49.412]   Labels:  controller-uid=f4dc9c74-26de-4085-a719-dba314e0b84c
I0814 10:52:49.412]            job-name=test-job
I0814 10:52:49.412]            run=pi
I0814 10:52:49.412]   Containers:
I0814 10:52:49.412]    pi:
... skipping 15 lines ...
I0814 10:52:49.416]   Type    Reason            Age   From            Message
I0814 10:52:49.416]   ----    ------            ----  ----            -------
I0814 10:52:49.416]   Normal  SuccessfulCreate  0s    job-controller  Created pod: test-job-n4bzg
I0814 10:52:49.487] job.batch "test-job" deleted
I0814 10:52:49.565] cronjob.batch "pi" deleted
I0814 10:52:49.643] namespace "test-jobs" deleted
W0814 10:52:49.744] E0814 10:52:48.591698   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:49.745] kubectl run --generator=cronjob/v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
W0814 10:52:49.745] E0814 10:52:48.695454   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:49.745] E0814 10:52:48.792278   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:49.746] E0814 10:52:48.908760   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:49.746] I0814 10:52:49.152281   53119 event.go:255] Event(v1.ObjectReference{Kind:"Job", Namespace:"test-jobs", Name:"test-job", UID:"f4dc9c74-26de-4085-a719-dba314e0b84c", APIVersion:"batch/v1", ResourceVersion:"1349", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: test-job-n4bzg
W0814 10:52:49.746] E0814 10:52:49.593271   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:49.746] E0814 10:52:49.697198   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:49.794] E0814 10:52:49.793865   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:49.910] E0814 10:52:49.910187   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:50.595] E0814 10:52:50.595099   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:50.699] E0814 10:52:50.698775   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:50.796] E0814 10:52:50.796118   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:50.912] E0814 10:52:50.911873   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:51.597] E0814 10:52:51.596661   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:51.701] E0814 10:52:51.700302   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:51.798] E0814 10:52:51.797933   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:51.914] E0814 10:52:51.913601   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:52.599] E0814 10:52:52.598652   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:52.702] E0814 10:52:52.701904   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:52.800] E0814 10:52:52.799388   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:52.916] E0814 10:52:52.915306   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:53.601] E0814 10:52:53.600237   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:53.704] E0814 10:52:53.703693   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:53.801] E0814 10:52:53.800888   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:53.918] E0814 10:52:53.917324   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:54.603] E0814 10:52:54.602285   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:54.705] E0814 10:52:54.704914   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:54.803] E0814 10:52:54.802926   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:54.904] +++ exit code: 0
I0814 10:52:54.904] Recording: run_create_job_tests
I0814 10:52:54.905] Running command: run_create_job_tests
I0814 10:52:54.905] 
I0814 10:52:54.905] +++ Running case: test-cmd.run_create_job_tests 
I0814 10:52:54.906] +++ working dir: /go/src/k8s.io/kubernetes
... skipping 27 lines ...
I0814 10:52:56.179] +++ [0814 10:52:56] Testing pod templates
I0814 10:52:56.273] core.sh:1415: Successful get podtemplates {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:52:56.437] (Bpodtemplate/nginx created
I0814 10:52:56.534] core.sh:1419: Successful get podtemplates {{range.items}}{{.metadata.name}}:{{end}}: nginx:
I0814 10:52:56.607] (BNAME    CONTAINERS   IMAGES   POD LABELS
I0814 10:52:56.608] nginx   nginx        nginx    name=nginx
W0814 10:52:56.708] E0814 10:52:54.918610   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:56.709] I0814 10:52:55.058117   53119 event.go:255] Event(v1.ObjectReference{Kind:"Job", Namespace:"namespace-1565779974-29665", Name:"test-job", UID:"abdb499b-8343-4c81-b984-777bde1bb4b1", APIVersion:"batch/v1", ResourceVersion:"1366", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: test-job-xpfw5
W0814 10:52:56.710] I0814 10:52:55.323380   53119 event.go:255] Event(v1.ObjectReference{Kind:"Job", Namespace:"namespace-1565779974-29665", Name:"test-job-pi", UID:"51b62ca2-4bfa-4743-ac7e-9263d9e3e326", APIVersion:"batch/v1", ResourceVersion:"1373", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: test-job-pi-55k7m
W0814 10:52:56.710] kubectl run --generator=cronjob/v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
W0814 10:52:56.711] E0814 10:52:55.603864   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:56.711] I0814 10:52:55.676475   53119 event.go:255] Event(v1.ObjectReference{Kind:"Job", Namespace:"namespace-1565779974-29665", Name:"my-pi", UID:"679c8ad0-089f-43f5-9894-c90d6f5006fc", APIVersion:"batch/v1", ResourceVersion:"1381", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: my-pi-psc44
W0814 10:52:56.711] E0814 10:52:55.706581   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:56.712] E0814 10:52:55.804402   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:56.712] E0814 10:52:55.920118   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:56.712] I0814 10:52:56.433925   49657 controller.go:606] quota admission added evaluator for: podtemplates
W0814 10:52:56.713] E0814 10:52:56.605215   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:56.713] E0814 10:52:56.708197   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:56.806] E0814 10:52:56.806062   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:56.907] core.sh:1427: Successful get podtemplates {{range.items}}{{.metadata.name}}:{{end}}: nginx:
I0814 10:52:56.908] (Bpodtemplate "nginx" deleted
I0814 10:52:56.978] core.sh:1431: Successful get podtemplate {{range.items}}{{.metadata.name}}:{{end}}: 
I0814 10:52:56.992] (B+++ exit code: 0
I0814 10:52:57.030] Recording: run_service_tests
I0814 10:52:57.030] Running command: run_service_tests
... skipping 65 lines ...
I0814 10:52:57.923] Port:              <unset>  6379/TCP
I0814 10:52:57.923] TargetPort:        6379/TCP
I0814 10:52:57.923] Endpoints:         <none>
I0814 10:52:57.923] Session Affinity:  None
I0814 10:52:57.923] Events:            <none>
I0814 10:52:57.923] (B
W0814 10:52:58.024] E0814 10:52:56.921588   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:58.024] E0814 10:52:57.606775   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:58.025] E0814 10:52:57.709780   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:58.025] E0814 10:52:57.807395   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:58.025] E0814 10:52:57.923270   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:52:58.126] Successful describe services:
I0814 10:52:58.126] Name:              kubernetes
I0814 10:52:58.126] Namespace:         default
I0814 10:52:58.126] Labels:            component=apiserver
I0814 10:52:58.127]                    provider=kubernetes
I0814 10:52:58.127] Annotations:       <none>
... skipping 238 lines ...
I0814 10:52:59.051]   selector:
I0814 10:52:59.051]     role: padawan
I0814 10:52:59.051]   sessionAffinity: None
I0814 10:52:59.051]   type: ClusterIP
I0814 10:52:59.051] status:
I0814 10:52:59.051]   loadBalancer: {}
W0814 10:52:59.152] E0814 10:52:58.609121   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:59.152] E0814 10:52:58.711185   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:59.153] E0814 10:52:58.809077   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:59.153] E0814 10:52:58.924990   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:52:59.153] error: you must specify resources by --filename when --local is set.
W0814 10:52:59.153] Example resource specifications include:
W0814 10:52:59.154]    '-f rsrc.yaml'
W0814 10:52:59.154]    '--filename=rsrc.json'
I0814 10:52:59.254] core.sh:898: Successful get services redis-master {{range.spec.selector}}{{.}}:{{end}}: redis:master:backend:
I0814 10:52:59.393] (Bcore.sh:905: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:
I0814 10:52:59.474] (Bservice "redis-master" deleted
... skipping 8 lines ...
I0814 10:53:00.546] core.sh:952: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:service-v1-test:
I0814 10:53:00.626] (Bservice "redis-master" deleted
I0814 10:53:00.717] service "service-v1-test" deleted
I0814 10:53:00.818] core.sh:960: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0814 10:53:00.910] (Bcore.sh:964: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0814 10:53:01.069] (Bservice/redis-master created
W0814 10:53:01.170] E0814 10:52:59.610543   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:01.171] E0814 10:52:59.712971   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:01.171] E0814 10:52:59.810691   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:01.172] E0814 10:52:59.926309   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:01.172] E0814 10:53:00.611843   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:01.172] E0814 10:53:00.714341   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:01.173] E0814 10:53:00.812165   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:01.173] E0814 10:53:00.927742   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:53:01.273] service/redis-slave created
I0814 10:53:01.337] core.sh:969: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:redis-slave:
I0814 10:53:01.424] (BSuccessful
I0814 10:53:01.424] message:NAME           RSRC
I0814 10:53:01.424] kubernetes     144
I0814 10:53:01.424] redis-master   1416
... skipping 35 lines ...
I0814 10:53:03.620] apps.sh:34: Successful get daemonsets bind {{.metadata.generation}}: 1
I0814 10:53:03.782] (Bdaemonset.apps/bind configured
I0814 10:53:03.883] apps.sh:37: Successful get daemonsets bind {{.metadata.generation}}: 1
I0814 10:53:03.976] (Bdaemonset.apps/bind image updated
I0814 10:53:04.076] apps.sh:40: Successful get daemonsets bind {{.metadata.generation}}: 2
I0814 10:53:04.171] (Bdaemonset.apps/bind env updated
W0814 10:53:04.272] E0814 10:53:01.612722   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:04.273] E0814 10:53:01.715747   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:04.273] E0814 10:53:01.813426   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:04.273] E0814 10:53:01.929285   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:04.274] kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
W0814 10:53:04.274] I0814 10:53:02.415504   53119 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"default", Name:"testmetadata", UID:"1aec9fea-8437-4177-8238-44bfc64c5f73", APIVersion:"apps/v1", ResourceVersion:"1431", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set testmetadata-6cdd84c77d to 2
W0814 10:53:04.274] I0814 10:53:02.422846   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"default", Name:"testmetadata-6cdd84c77d", UID:"ccdbd07b-400c-48f6-8171-96973841d58f", APIVersion:"apps/v1", ResourceVersion:"1432", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: testmetadata-6cdd84c77d-8txjn
W0814 10:53:04.275] I0814 10:53:02.426545   53119 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"default", Name:"testmetadata-6cdd84c77d", UID:"ccdbd07b-400c-48f6-8171-96973841d58f", APIVersion:"apps/v1", ResourceVersion:"1432", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: testmetadata-6cdd84c77d-kgz7w
W0814 10:53:04.275] E0814 10:53:02.616839   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:04.275] E0814 10:53:02.717411   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:04.276] E0814 10:53:02.815042   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:04.276] E0814 10:53:02.931029   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:04.276] I0814 10:53:03.518078   49657 controller.go:606] quota admission added evaluator for: daemonsets.apps
W0814 10:53:04.276] I0814 10:53:03.533168   49657 controller.go:606] quota admission added evaluator for: controllerrevisions.apps
W0814 10:53:04.277] E0814 10:53:03.618507   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:04.277] E0814 10:53:03.719209   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:04.277] E0814 10:53:03.816763   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:04.277] E0814 10:53:03.932265   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0814 10:53:04.378] apps.sh:42: Successful get daemonsets bind {{.metadata.generation}}: 3
I0814 10:53:04.378] (Bdaemonset.apps/bind resource requirements updated
I0814 10:53:04.475] apps.sh:44: Successful get daemonsets bind {{.metadata.generation}}: 4
I0814 10:53:04.570] (Bdaemonset.apps/bind restarted
I0814 10:53:04.673] apps.sh:48: Successful get daemonsets bind {{.metadata.generation}}: 5
I0814 10:53:04.754] (Bdaemonset.apps "bind" deleted
... skipping 37 lines ...
I0814 10:53:06.543] (Bapps.sh:84: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd:
I0814 10:53:06.640] (Bapps.sh:85: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2
I0814 10:53:06.743] (Bdaemonset.apps/bind rolled back
I0814 10:53:06.849] apps.sh:88: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:2.0:
I0814 10:53:06.944] (Bapps.sh:89: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1
I0814 10:53:07.052] (BSuccessful
I0814 10:53:07.052] message:error: unable to find specified revision 1000000 in history
I0814 10:53:07.052] has:unable to find specified revision
I0814 10:53:07.149] apps.sh:93: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:2.0:
I0814 10:53:07.250] (Bapps.sh:94: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1
I0814 10:53:07.353] (Bdaemonset.apps/bind rolled back
I0814 10:53:07.454] apps.sh:97: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:latest:
I0814 10:53:07.547] (Bapps.sh:98: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd:
... skipping 22 lines ...
I0814 10:53:08.875] Namespace:    namespace-1565779987-689
I0814 10:53:08.875] Selector:     app=guestbook,tier=frontend
I0814 10:53:08.876] Labels:       app=guestbook
I0814 10:53:08.876]               tier=frontend
I0814 10:53:08.876] Annotations:  <none>
I0814 10:53:08.876] Replicas:     3 current / 3 desired
I0814 10:53:08.876] Pods Status:  0 Running / 3 Waiting / 0 Succeeded / 0 Failed
I0814 10:53:08.876] Pod Template:
I0814 10:53:08.876]   Labels:  app=guestbook
I0814 10:53:08.876]            tier=frontend
I0814 10:53:08.876]   Containers:
I0814 10:53:08.876]    php-redis:
I0814 10:53:08.876]     Image:      gcr.io/google_samples/gb-frontend:v4
... skipping 17 lines ...
I0814 10:53:08.983] Namespace:    namespace-1565779987-689
I0814 10:53:08.983] Selector:     app=guestbook,tier=frontend
I0814 10:53:08.983] Labels:       app=guestbook
I0814 10:53:08.983]               tier=frontend
I0814 10:53:08.984] Annotations:  <none>
I0814 10:53:08.984] Replicas:     3 current / 3 desired
I0814 10:53:08.984] Pods Status:  0 Running / 3 Waiting / 0 Succeeded / 0 Failed
I0814 10:53:08.985] Pod Template:
I0814 10:53:08.985]   Labels:  app=guestbook
I0814 10:53:08.985]            tier=frontend
I0814 10:53:08.985]   Containers:
I0814 10:53:08.986]    php-redis:
I0814 10:53:08.986]     Image:      gcr.io/google_samples/gb-frontend:v4
... skipping 18 lines ...
I0814 10:53:09.095] Namespace:    namespace-1565779987-689
I0814 10:53:09.095] Selector:     app=guestbook,tier=frontend
I0814 10:53:09.096] Labels:       app=guestbook
I0814 10:53:09.096]               tier=frontend
I0814 10:53:09.096] Annotations:  <none>
I0814 10:53:09.097] Replicas:     3 current / 3 desired
I0814 10:53:09.097] Pods Status:  0 Running / 3 Waiting / 0 Succeeded / 0 Failed
I0814 10:53:09.097] Pod Template:
I0814 10:53:09.098]   Labels:  app=guestbook
I0814 10:53:09.098]            tier=frontend
I0814 10:53:09.099]   Containers:
I0814 10:53:09.099]    php-redis:
I0814 10:53:09.099]     Image:      gcr.io/google_samples/gb-frontend:v4
... skipping 4 lines ...
I0814 10:53:09.101]       memory:  100Mi
I0814 10:53:09.101]     Environment:
I0814 10:53:09.102]       GET_HOSTS_FROM:  dns
I0814 10:53:09.102]     Mounts:            <none>
I0814 10:53:09.103]   Volumes:             <none>
I0814 10:53:09.103] (B
W0814 10:53:09.204] E0814 10:53:04.620296   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:09.205] E0814 10:53:04.720903   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:09.205] E0814 10:53:04.818326   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:09.206] E0814 10:53:04.934148   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:09.206] E0814 10:53:05.621937   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:09.207] E0814 10:53:05.722528   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:09.207] E0814 10:53:05.819769   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:09.208] E0814 10:53:05.936102   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:09.208] E0814 10:53:06.623623   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:09.208] E0814 10:53:06.724033   53119 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0814 10:53:09.213] E0814 10:53:06.772818   53119 daemon_controller.go:302] namespace-1565779984-15782/bind failed with : error storing status for daemon set &v1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"bind", GenerateName:"", Namespace:"namespace-1565779984-15782", SelfLink:"/apis/apps/v1/namespaces/namespace-1565779984-15782/daemonsets/bind", UID:"cca1facc-2e62-4a44-822c-340b61588063", ResourceVersion:"1498", Generation:3, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63701376785, loc:(*time.Location)(0x7213220)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"service":"bind"}, Annotations:map[string]string{"deprecated.daemonset.template.generation":"3", "kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"DaemonSet\",\"metadata\":{\"annotations\":{\"kubernetes.io/change-cause\":\"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true\"},\"labels\":{\"service\":\"bind\"},\"name\":\"bind\",\"namespace\":\"namespace-1565779984-15782\"},\"spec\":{\"selector\":{\"matchLabels\":{\"service\":\"bind\"}},\"template\":{\"metadata\":{\"labels\":{\"service\":\"bind\"}},\"spec\":{\"affinity\":{\"podAntiAffinity\":{\"requiredDuringSchedulingIgnoredDuringExecution\":[{\"labelSelector\":{\"matchExpressions\":[{\"key\":\"service\",\"operator\":\"In\",\"values\":[\"bind\"]}]},\"namespaces\":[],\"topologyKey\":\"kubernetes.io/hostname\"}]}},\"containers\":[{\"image\":\"k8s.gcr.io/pause:latest\",\"name\":\"kubernetes-pause\"},{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"app\"}]}},\"updateStrategy\":{\"rollingUpdate\":{\"maxUnavailable\":\"10%\"},\"type\":\"RollingUpdate\"}}}\n", "kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"kube-controller-manager", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc001d4d880), Fields:(*v1.Fields)(0xc001d4d8a0)}, v1.ManagedFieldsEntry{Manager:"kubectl", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc001d4d8c0), Fields:(*v1.Fields)(0xc001d4d8e0)}, v1.ManagedFieldsEntry{Manager:"kubectl", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc001d4d900), Fields:(*v1.Fields)(0xc001d4d940)}}}, Spec:v1.DaemonSetSpec{Selector:(*v1.LabelSelector)(0xc001d4d9a0), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"service":"bind"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume(nil), InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"kubernetes-pause", Image:"k8s.gcr.io/pause:2.0", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount(nil), VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc0009c4868), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"", DeprecatedServiceAccount:"", AutomountServiceAccountToken:(*bool)(nil), NodeName:"", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc000706120), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(0xc001d4d9c0), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration(nil), HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(nil), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(nil), PreemptionPolicy:(*v1.PreemptionPolicy)(nil), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil)}}, UpdateStrategy:v1.DaemonSetUpdateStrategy{Type:"RollingUpdate", RollingUpdate:(*v1.RollingUpdateDaemonSet)(0xc00221e338)}, MinReadySeconds:0, RevisionHistoryLimit:(*int32)(0xc0009c495c)}, Status:v1.DaemonSetStatus{CurrentNumberScheduled:0, NumberMisscheduled:0, DesiredNumberSched