This job view page is being replaced by Spyglass soon. Check out the new job view.
ResultFAILURE
Tests 1 failed / 622 succeeded
Started2019-02-12 07:05
Elapsed27m5s
Revision
Buildergke-prow-containerd-pool-99179761-3k8h
pod5b873ee8-2e94-11e9-8fd4-0a580a6c0716
infra-commit490a26c55
pod5b873ee8-2e94-11e9-8fd4-0a580a6c0716
repok8s.io/kubernetes
repo-commit04802149032be3335ec560b8b16095b096999c79
repos{u'k8s.io/kubernetes': u'master'}

Test Failures


k8s.io/kubernetes/test/integration/scheduler TestPreemptionRaces 21s

go test -v k8s.io/kubernetes/test/integration/scheduler -run TestPreemptionRaces$
I0212 07:24:27.167292  124047 services.go:33] Network range for service cluster IPs is unspecified. Defaulting to {10.0.0.0 ffffff00}.
I0212 07:24:27.167318  124047 services.go:45] Setting service IP to "10.0.0.1" (read-write).
I0212 07:24:27.167335  124047 master.go:272] Node port range unspecified. Defaulting to 30000-32767.
I0212 07:24:27.167355  124047 master.go:228] Using reconciler: 
I0212 07:24:27.168731  124047 storage_factory.go:285] storing podtemplates in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.168881  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.168901  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.168939  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.168981  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.169889  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.169974  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.170375  124047 store.go:1310] Monitoring podtemplates count at <storage-prefix>//podtemplates
I0212 07:24:27.170420  124047 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.170536  124047 reflector.go:170] Listing and watching *core.PodTemplate from storage/cacher.go:/podtemplates
I0212 07:24:27.171564  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.171628  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.171719  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.171878  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.172273  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.172378  124047 store.go:1310] Monitoring events count at <storage-prefix>//events
I0212 07:24:27.172481  124047 storage_factory.go:285] storing limitranges in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.172689  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.172697  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.172717  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.172775  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.172882  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.173596  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.173676  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.173985  124047 store.go:1310] Monitoring limitranges count at <storage-prefix>//limitranges
I0212 07:24:27.174037  124047 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.174118  124047 reflector.go:170] Listing and watching *core.LimitRange from storage/cacher.go:/limitranges
I0212 07:24:27.174199  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.174274  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.174360  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.174411  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.175208  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.175373  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.175939  124047 store.go:1310] Monitoring resourcequotas count at <storage-prefix>//resourcequotas
I0212 07:24:27.176020  124047 reflector.go:170] Listing and watching *core.ResourceQuota from storage/cacher.go:/resourcequotas
I0212 07:24:27.176121  124047 storage_factory.go:285] storing secrets in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.176198  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.176212  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.176235  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.176267  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.176582  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.176783  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.176793  124047 store.go:1310] Monitoring secrets count at <storage-prefix>//secrets
I0212 07:24:27.176847  124047 reflector.go:170] Listing and watching *core.Secret from storage/cacher.go:/secrets
I0212 07:24:27.176971  124047 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.177071  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.177093  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.177121  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.177167  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.177431  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.177761  124047 store.go:1310] Monitoring persistentvolumes count at <storage-prefix>//persistentvolumes
I0212 07:24:27.177920  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.178031  124047 reflector.go:170] Listing and watching *core.PersistentVolume from storage/cacher.go:/persistentvolumes
I0212 07:24:27.178060  124047 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.178287  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.178341  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.178387  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.178479  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.178862  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.178907  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.179126  124047 store.go:1310] Monitoring persistentvolumeclaims count at <storage-prefix>//persistentvolumeclaims
I0212 07:24:27.179264  124047 reflector.go:170] Listing and watching *core.PersistentVolumeClaim from storage/cacher.go:/persistentvolumeclaims
I0212 07:24:27.179341  124047 storage_factory.go:285] storing configmaps in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.179416  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.179429  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.179454  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.179541  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.180030  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.180264  124047 store.go:1310] Monitoring configmaps count at <storage-prefix>//configmaps
I0212 07:24:27.180285  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.180289  124047 reflector.go:170] Listing and watching *core.ConfigMap from storage/cacher.go:/configmaps
I0212 07:24:27.180400  124047 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.180474  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.180486  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.180548  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.180595  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.181103  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.181242  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.181617  124047 store.go:1310] Monitoring namespaces count at <storage-prefix>//namespaces
I0212 07:24:27.181727  124047 reflector.go:170] Listing and watching *core.Namespace from storage/cacher.go:/namespaces
I0212 07:24:27.181946  124047 storage_factory.go:285] storing endpoints in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.182421  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.182449  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.182516  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.182574  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.183117  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.183417  124047 store.go:1310] Monitoring endpoints count at <storage-prefix>//endpoints
I0212 07:24:27.183474  124047 reflector.go:170] Listing and watching *core.Endpoints from storage/cacher.go:/endpoints
I0212 07:24:27.183751  124047 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.183871  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.183924  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.183433  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.183995  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.184089  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.184616  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.184922  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.185032  124047 store.go:1310] Monitoring nodes count at <storage-prefix>//nodes
I0212 07:24:27.185087  124047 reflector.go:170] Listing and watching *core.Node from storage/cacher.go:/nodes
I0212 07:24:27.185228  124047 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.185329  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.185347  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.185377  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.185422  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.185734  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.185867  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.186049  124047 store.go:1310] Monitoring pods count at <storage-prefix>//pods
I0212 07:24:27.186108  124047 reflector.go:170] Listing and watching *core.Pod from storage/cacher.go:/pods
I0212 07:24:27.186270  124047 storage_factory.go:285] storing serviceaccounts in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.186525  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.186550  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.186587  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.186703  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.186985  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.187087  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.187249  124047 store.go:1310] Monitoring serviceaccounts count at <storage-prefix>//serviceaccounts
I0212 07:24:27.187433  124047 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.187572  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.187596  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.187636  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.187668  124047 reflector.go:170] Listing and watching *core.ServiceAccount from storage/cacher.go:/serviceaccounts
I0212 07:24:27.187813  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.188014  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.188109  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.188383  124047 store.go:1310] Monitoring services count at <storage-prefix>//services
I0212 07:24:27.188426  124047 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.188551  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.188570  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.188569  124047 reflector.go:170] Listing and watching *core.Service from storage/cacher.go:/services
I0212 07:24:27.188599  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.188722  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.189369  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.189523  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.189542  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.189570  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.189582  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.189629  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.189834  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.189869  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.190092  124047 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.190174  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.190196  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.190230  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.190277  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.190822  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.190889  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.191075  124047 store.go:1310] Monitoring replicationcontrollers count at <storage-prefix>//replicationcontrollers
I0212 07:24:27.191259  124047 reflector.go:170] Listing and watching *core.ReplicationController from storage/cacher.go:/replicationcontrollers
I0212 07:24:27.208615  124047 master.go:407] Skipping disabled API group "auditregistration.k8s.io".
I0212 07:24:27.208672  124047 master.go:415] Enabling API group "authentication.k8s.io".
I0212 07:24:27.208693  124047 master.go:415] Enabling API group "authorization.k8s.io".
I0212 07:24:27.208889  124047 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.209057  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.209115  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.209234  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.209314  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.209862  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.210007  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.210186  124047 store.go:1310] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0212 07:24:27.210762  124047 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.210883  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.210908  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.210964  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.210218  124047 reflector.go:170] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0212 07:24:27.211304  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.211650  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.211734  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.211987  124047 store.go:1310] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0212 07:24:27.212094  124047 reflector.go:170] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0212 07:24:27.212240  124047 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.212745  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.212765  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.212816  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.212880  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.213179  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.213259  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.213534  124047 store.go:1310] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0212 07:24:27.213559  124047 master.go:415] Enabling API group "autoscaling".
I0212 07:24:27.213570  124047 reflector.go:170] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0212 07:24:27.213759  124047 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.213861  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.213888  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.213944  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.214024  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.214385  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.214678  124047 store.go:1310] Monitoring jobs.batch count at <storage-prefix>//jobs
I0212 07:24:27.214832  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.214872  124047 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.215009  124047 reflector.go:170] Listing and watching *batch.Job from storage/cacher.go:/jobs
I0212 07:24:27.215166  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.215563  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.215645  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.215726  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.216913  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.217286  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.217292  124047 store.go:1310] Monitoring cronjobs.batch count at <storage-prefix>//cronjobs
I0212 07:24:27.217326  124047 reflector.go:170] Listing and watching *batch.CronJob from storage/cacher.go:/cronjobs
I0212 07:24:27.217336  124047 master.go:415] Enabling API group "batch".
I0212 07:24:27.217563  124047 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.217650  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.217662  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.217694  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.217861  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.218100  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.218457  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.218634  124047 store.go:1310] Monitoring certificatesigningrequests.certificates.k8s.io count at <storage-prefix>//certificatesigningrequests
I0212 07:24:27.218673  124047 master.go:415] Enabling API group "certificates.k8s.io".
I0212 07:24:27.218909  124047 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.218984  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.218996  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.219124  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.219193  124047 reflector.go:170] Listing and watching *certificates.CertificateSigningRequest from storage/cacher.go:/certificatesigningrequests
I0212 07:24:27.220084  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.220742  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.221003  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.221004  124047 store.go:1310] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I0212 07:24:27.221074  124047 reflector.go:170] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I0212 07:24:27.221222  124047 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.221300  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.221311  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.221349  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.221415  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.222256  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.222543  124047 store.go:1310] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I0212 07:24:27.222562  124047 master.go:415] Enabling API group "coordination.k8s.io".
I0212 07:24:27.222670  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.222742  124047 reflector.go:170] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I0212 07:24:27.222765  124047 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.222842  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.222875  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.222927  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.222997  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.223398  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.223650  124047 store.go:1310] Monitoring replicationcontrollers count at <storage-prefix>//replicationcontrollers
I0212 07:24:27.223992  124047 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.224134  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.224175  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.224217  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.224346  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.224367  124047 reflector.go:170] Listing and watching *core.ReplicationController from storage/cacher.go:/replicationcontrollers
I0212 07:24:27.224449  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.224722  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.225012  124047 store.go:1310] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I0212 07:24:27.225117  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.225127  124047 reflector.go:170] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I0212 07:24:27.225253  124047 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.225350  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.225364  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.225391  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.225594  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.225894  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.225916  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.226180  124047 store.go:1310] Monitoring deployments.apps count at <storage-prefix>//deployments
I0212 07:24:27.226276  124047 reflector.go:170] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0212 07:24:27.226339  124047 storage_factory.go:285] storing ingresses.extensions in extensions/v1beta1, reading as extensions/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.226425  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.226437  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.226465  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.226525  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.226907  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.226939  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.227153  124047 store.go:1310] Monitoring ingresses.extensions count at <storage-prefix>//ingresses
I0212 07:24:27.227281  124047 reflector.go:170] Listing and watching *extensions.Ingress from storage/cacher.go:/ingresses
I0212 07:24:27.227339  124047 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.227426  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.227457  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.227545  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.227600  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.227820  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.227858  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.228035  124047 store.go:1310] Monitoring podsecuritypolicies.policy count at <storage-prefix>//podsecuritypolicies
I0212 07:24:27.228148  124047 reflector.go:170] Listing and watching *policy.PodSecurityPolicy from storage/cacher.go:/podsecuritypolicies
I0212 07:24:27.228205  124047 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.228297  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.228310  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.228380  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.228456  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.228677  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.228749  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.228876  124047 store.go:1310] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I0212 07:24:27.228975  124047 reflector.go:170] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I0212 07:24:27.229036  124047 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.229117  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.229126  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.229144  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.229178  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.229443  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.229631  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.229791  124047 store.go:1310] Monitoring networkpolicies.networking.k8s.io count at <storage-prefix>//networkpolicies
I0212 07:24:27.229817  124047 reflector.go:170] Listing and watching *networking.NetworkPolicy from storage/cacher.go:/networkpolicies
I0212 07:24:27.229817  124047 master.go:415] Enabling API group "extensions".
I0212 07:24:27.230066  124047 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.230136  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.230152  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.230178  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.230247  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.230566  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.230638  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.230807  124047 store.go:1310] Monitoring networkpolicies.networking.k8s.io count at <storage-prefix>//networkpolicies
I0212 07:24:27.230829  124047 master.go:415] Enabling API group "networking.k8s.io".
I0212 07:24:27.230981  124047 reflector.go:170] Listing and watching *networking.NetworkPolicy from storage/cacher.go:/networkpolicies
I0212 07:24:27.231051  124047 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.231144  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.231159  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.231194  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.231268  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.231547  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.231645  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.231731  124047 store.go:1310] Monitoring poddisruptionbudgets.policy count at <storage-prefix>//poddisruptionbudgets
I0212 07:24:27.231866  124047 reflector.go:170] Listing and watching *policy.PodDisruptionBudget from storage/cacher.go:/poddisruptionbudgets
I0212 07:24:27.231923  124047 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.231998  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.232011  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.232058  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.232161  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.232417  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.232444  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.232652  124047 store.go:1310] Monitoring podsecuritypolicies.policy count at <storage-prefix>//podsecuritypolicies
I0212 07:24:27.232668  124047 master.go:415] Enabling API group "policy".
I0212 07:24:27.232699  124047 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.232714  124047 reflector.go:170] Listing and watching *policy.PodSecurityPolicy from storage/cacher.go:/podsecuritypolicies
I0212 07:24:27.232900  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.232914  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.232943  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.232989  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.233242  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.233285  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.233424  124047 store.go:1310] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I0212 07:24:27.233656  124047 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.233774  124047 reflector.go:170] Listing and watching *rbac.Role from storage/cacher.go:/roles
I0212 07:24:27.233779  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.233827  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.233945  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.234017  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.234401  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.234437  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.234642  124047 store.go:1310] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I0212 07:24:27.234683  124047 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.234766  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.235444  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.234946  124047 reflector.go:170] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I0212 07:24:27.235547  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.235599  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.235914  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.235991  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.236482  124047 store.go:1310] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I0212 07:24:27.236640  124047 reflector.go:170] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I0212 07:24:27.236658  124047 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.236744  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.236757  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.236787  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.236969  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.237647  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.237789  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.238143  124047 store.go:1310] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I0212 07:24:27.238203  124047 reflector.go:170] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I0212 07:24:27.238197  124047 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.238440  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.238461  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.238513  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.238951  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.239292  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.239407  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.239477  124047 store.go:1310] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I0212 07:24:27.239540  124047 reflector.go:170] Listing and watching *rbac.Role from storage/cacher.go:/roles
I0212 07:24:27.239732  124047 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.239840  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.239865  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.239905  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.239989  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.240414  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.240526  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.240630  124047 store.go:1310] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I0212 07:24:27.240656  124047 reflector.go:170] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I0212 07:24:27.240682  124047 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.240887  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.240930  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.241003  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.241096  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.241343  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.241397  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.241608  124047 store.go:1310] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I0212 07:24:27.241673  124047 reflector.go:170] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I0212 07:24:27.241795  124047 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.241871  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.241884  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.241911  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.241956  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.242353  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.242403  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.243073  124047 store.go:1310] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I0212 07:24:27.243120  124047 master.go:415] Enabling API group "rbac.authorization.k8s.io".
I0212 07:24:27.243129  124047 reflector.go:170] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I0212 07:24:27.245554  124047 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1beta1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.245659  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.245683  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.245714  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.245784  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.246150  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.246216  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.246407  124047 store.go:1310] Monitoring priorityclasses.scheduling.k8s.io count at <storage-prefix>//priorityclasses
I0212 07:24:27.246463  124047 master.go:415] Enabling API group "scheduling.k8s.io".
I0212 07:24:27.246514  124047 master.go:407] Skipping disabled API group "settings.k8s.io".
I0212 07:24:27.246545  124047 reflector.go:170] Listing and watching *scheduling.PriorityClass from storage/cacher.go:/priorityclasses
I0212 07:24:27.246675  124047 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.246780  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.246804  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.246863  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.247036  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.247360  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.247722  124047 store.go:1310] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I0212 07:24:27.247759  124047 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.247774  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.247780  124047 reflector.go:170] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I0212 07:24:27.247919  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.247931  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.248082  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.248158  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.249154  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.249241  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.249363  124047 store.go:1310] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I0212 07:24:27.249559  124047 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.249604  124047 reflector.go:170] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I0212 07:24:27.249681  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.249705  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.249769  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.249894  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.250166  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.250319  124047 store.go:1310] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I0212 07:24:27.250353  124047 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.250441  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.250456  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.250483  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.250592  124047 reflector.go:170] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I0212 07:24:27.250655  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.250868  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.251791  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.252014  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.252024  124047 store.go:1310] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I0212 07:24:27.252063  124047 master.go:415] Enabling API group "storage.k8s.io".
I0212 07:24:27.252257  124047 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.252286  124047 reflector.go:170] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I0212 07:24:27.252352  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.252397  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.252433  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.252474  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.252760  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.252809  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.253165  124047 store.go:1310] Monitoring deployments.apps count at <storage-prefix>//deployments
I0212 07:24:27.253249  124047 reflector.go:170] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0212 07:24:27.253348  124047 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.253440  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.253462  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.253578  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.253626  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.253837  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.253883  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.254428  124047 store.go:1310] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I0212 07:24:27.254545  124047 reflector.go:170] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I0212 07:24:27.254674  124047 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.254766  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.254790  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.254829  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.254941  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.255249  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.255478  124047 store.go:1310] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I0212 07:24:27.255597  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.255690  124047 reflector.go:170] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I0212 07:24:27.255697  124047 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.255810  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.255848  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.255894  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.255950  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.256175  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.256425  124047 store.go:1310] Monitoring deployments.apps count at <storage-prefix>//deployments
I0212 07:24:27.256478  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.256609  124047 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.256659  124047 reflector.go:170] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0212 07:24:27.256724  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.256749  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.256809  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.256897  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.257118  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.257149  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.257474  124047 store.go:1310] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I0212 07:24:27.257529  124047 reflector.go:170] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I0212 07:24:27.257650  124047 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.257717  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.257741  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.257770  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.257978  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.258385  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.258452  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.258694  124047 store.go:1310] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I0212 07:24:27.258829  124047 reflector.go:170] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I0212 07:24:27.258879  124047 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.258997  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.259032  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.259111  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.259189  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.259424  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.259708  124047 store.go:1310] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I0212 07:24:27.259868  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.259889  124047 reflector.go:170] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I0212 07:24:27.259894  124047 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.259958  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.259980  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.260021  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.260117  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.260654  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.260753  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.260896  124047 store.go:1310] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I0212 07:24:27.260992  124047 reflector.go:170] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I0212 07:24:27.261099  124047 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.261175  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.261200  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.261240  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.261315  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.261707  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.261731  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.261962  124047 store.go:1310] Monitoring deployments.apps count at <storage-prefix>//deployments
I0212 07:24:27.262081  124047 reflector.go:170] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0212 07:24:27.262157  124047 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.262264  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.262287  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.262315  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.262372  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.262615  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.262763  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.262833  124047 store.go:1310] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I0212 07:24:27.262875  124047 reflector.go:170] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I0212 07:24:27.263001  124047 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.263101  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.263115  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.263164  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.263210  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.263526  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.263549  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.263777  124047 store.go:1310] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I0212 07:24:27.263970  124047 reflector.go:170] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I0212 07:24:27.263968  124047 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.264076  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.264089  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.264129  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.264192  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.264566  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.264762  124047 store.go:1310] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I0212 07:24:27.264798  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.264884  124047 reflector.go:170] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I0212 07:24:27.265008  124047 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.265112  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.265125  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.265152  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.265774  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.266098  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.266337  124047 store.go:1310] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I0212 07:24:27.266388  124047 master.go:415] Enabling API group "apps".
I0212 07:24:27.266423  124047 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.266475  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.266527  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.266541  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.266542  124047 reflector.go:170] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I0212 07:24:27.266569  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.266711  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.267575  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.267659  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.267778  124047 store.go:1310] Monitoring validatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//validatingwebhookconfigurations
I0212 07:24:27.267809  124047 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.267910  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.267938  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.267835  124047 reflector.go:170] Listing and watching *admissionregistration.ValidatingWebhookConfiguration from storage/cacher.go:/validatingwebhookconfigurations
I0212 07:24:27.267979  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.268017  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.268363  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.268619  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.268694  124047 store.go:1310] Monitoring mutatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//mutatingwebhookconfigurations
I0212 07:24:27.268770  124047 reflector.go:170] Listing and watching *admissionregistration.MutatingWebhookConfiguration from storage/cacher.go:/mutatingwebhookconfigurations
I0212 07:24:27.268807  124047 master.go:415] Enabling API group "admissionregistration.k8s.io".
I0212 07:24:27.268856  124047 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"b2a57c8f-de33-4469-a89c-6a6a6fdee16f", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 07:24:27.269674  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:27.269698  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:27.269754  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:27.269796  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:27.270002  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:27.270057  124047 store.go:1310] Monitoring events count at <storage-prefix>//events
I0212 07:24:27.270072  124047 master.go:415] Enabling API group "events.k8s.io".
I0212 07:24:27.270182  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
W0212 07:24:27.277816  124047 genericapiserver.go:330] Skipping API batch/v2alpha1 because it has no resources.
W0212 07:24:27.293602  124047 genericapiserver.go:330] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources.
W0212 07:24:27.294262  124047 genericapiserver.go:330] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources.
W0212 07:24:27.296864  124047 genericapiserver.go:330] Skipping API storage.k8s.io/v1alpha1 because it has no resources.
I0212 07:24:27.316000  124047 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 07:24:27.316037  124047 healthz.go:170] healthz check poststarthook/bootstrap-controller failed: not finished
I0212 07:24:27.316068  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:27.316088  124047 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 07:24:27.316109  124047 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 07:24:27.316287  124047 wrap.go:47] GET /healthz: (383.6µs) 500
goroutine 28273 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01dd550a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01dd550a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01dd492e0, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01dd34290, 0xc0191351e0, 0x18a, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01dd34290, 0xc01dcf3a00)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01dd34290, 0xc01dcf3a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01dd34290, 0xc01dcf3a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01dd34290, 0xc01dcf3a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01dd34290, 0xc01dcf3a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01dd34290, 0xc01dcf3a00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01dd34290, 0xc01dcf3a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01dd34290, 0xc01dcf3a00)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01dd34290, 0xc01dcf3a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01dd34290, 0xc01dcf3a00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01dd34290, 0xc01dcf3a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01dd34290, 0xc01dcf3900)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01dd34290, 0xc01dcf3900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01d7cdf20, 0xc01ac164e0, 0x60d7720, 0xc01dd34290, 0xc01dcf3900)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[-]poststarthook/bootstrap-controller failed: reason withheld\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34390]
I0212 07:24:27.318539  124047 wrap.go:47] GET /api/v1/services: (1.308673ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34390]
I0212 07:24:27.322302  124047 wrap.go:47] GET /api/v1/services: (1.00865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34390]
I0212 07:24:27.325525  124047 wrap.go:47] GET /api/v1/namespaces/default: (1.080844ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34390]
I0212 07:24:27.327814  124047 wrap.go:47] POST /api/v1/namespaces: (1.817724ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34390]
I0212 07:24:27.329456  124047 wrap.go:47] GET /api/v1/namespaces/default/services/kubernetes: (1.187729ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34390]
I0212 07:24:27.334009  124047 wrap.go:47] POST /api/v1/namespaces/default/services: (4.097004ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34390]
I0212 07:24:27.340128  124047 wrap.go:47] GET /api/v1/namespaces/default/endpoints/kubernetes: (3.287214ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34390]
I0212 07:24:27.343981  124047 wrap.go:47] POST /api/v1/namespaces/default/endpoints: (3.289008ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34390]
I0212 07:24:27.346899  124047 wrap.go:47] GET /api/v1/namespaces/kube-system: (992.152µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34396]
I0212 07:24:27.348773  124047 wrap.go:47] GET /api/v1/namespaces/default: (3.836356ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34390]
I0212 07:24:27.348812  124047 wrap.go:47] GET /api/v1/services: (2.409233ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:27.349769  124047 wrap.go:47] GET /api/v1/services: (1.148874ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34400]
I0212 07:24:27.350913  124047 wrap.go:47] POST /api/v1/namespaces: (1.949457ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34396]
I0212 07:24:27.352017  124047 wrap.go:47] GET /api/v1/namespaces/kube-public: (708.055µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34396]
I0212 07:24:27.352205  124047 wrap.go:47] GET /api/v1/namespaces/default/services/kubernetes: (1.265763ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34400]
I0212 07:24:27.353638  124047 wrap.go:47] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.037257ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:27.353728  124047 wrap.go:47] POST /api/v1/namespaces: (1.306158ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34396]
I0212 07:24:27.355006  124047 wrap.go:47] GET /api/v1/namespaces/kube-node-lease: (915.75µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34396]
I0212 07:24:27.357021  124047 wrap.go:47] POST /api/v1/namespaces: (1.629314ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34396]
I0212 07:24:27.417088  124047 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 07:24:27.417117  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:27.417126  124047 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 07:24:27.417132  124047 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 07:24:27.417308  124047 wrap.go:47] GET /healthz: (331.58µs) 500
goroutine 28325 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01de98930, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01de98930, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01de9d400, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01aa86be8, 0xc014e3e900, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01aa86be8, 0xc01de51600)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01aa86be8, 0xc01de51600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01aa86be8, 0xc01de51600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01aa86be8, 0xc01de51600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01aa86be8, 0xc01de51600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01aa86be8, 0xc01de51600)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01aa86be8, 0xc01de51600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01aa86be8, 0xc01de51600)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01aa86be8, 0xc01de51600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01aa86be8, 0xc01de51600)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01aa86be8, 0xc01de51600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01aa86be8, 0xc01de51500)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01aa86be8, 0xc01de51500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01deca360, 0xc01ac164e0, 0x60d7720, 0xc01aa86be8, 0xc01de51500)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34396]
I0212 07:24:27.517137  124047 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 07:24:27.517177  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:27.517188  124047 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 07:24:27.517195  124047 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 07:24:27.517370  124047 wrap.go:47] GET /healthz: (372.737µs) 500
goroutine 28327 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01de98a10, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01de98a10, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01de9d4a0, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01aa86bf0, 0xc014e3ed80, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01aa86bf0, 0xc01de51a00)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01aa86bf0, 0xc01de51a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01aa86bf0, 0xc01de51a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01aa86bf0, 0xc01de51a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01aa86bf0, 0xc01de51a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01aa86bf0, 0xc01de51a00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01aa86bf0, 0xc01de51a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01aa86bf0, 0xc01de51a00)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01aa86bf0, 0xc01de51a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01aa86bf0, 0xc01de51a00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01aa86bf0, 0xc01de51a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01aa86bf0, 0xc01de51900)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01aa86bf0, 0xc01de51900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01deca420, 0xc01ac164e0, 0x60d7720, 0xc01aa86bf0, 0xc01de51900)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34396]
I0212 07:24:27.617271  124047 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 07:24:27.617322  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:27.617335  124047 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 07:24:27.617342  124047 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 07:24:27.617550  124047 wrap.go:47] GET /healthz: (419.23µs) 500
goroutine 28354 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01d2a16c0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01d2a16c0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01de2b5e0, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc0155ddc78, 0xc01443fc80, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc0155ddc78, 0xc01ddd9700)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc0155ddc78, 0xc01ddd9700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc0155ddc78, 0xc01ddd9700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc0155ddc78, 0xc01ddd9700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc0155ddc78, 0xc01ddd9700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc0155ddc78, 0xc01ddd9700)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc0155ddc78, 0xc01ddd9700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc0155ddc78, 0xc01ddd9700)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc0155ddc78, 0xc01ddd9700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc0155ddc78, 0xc01ddd9700)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc0155ddc78, 0xc01ddd9700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc0155ddc78, 0xc01ddd9600)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc0155ddc78, 0xc01ddd9600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01d0f9d40, 0xc01ac164e0, 0x60d7720, 0xc0155ddc78, 0xc01ddd9600)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34396]
I0212 07:24:27.717343  124047 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 07:24:27.717378  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:27.717388  124047 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 07:24:27.717396  124047 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 07:24:27.717594  124047 wrap.go:47] GET /healthz: (390.097µs) 500
goroutine 28329 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01de98b60, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01de98b60, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01de9d6c0, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01aa86bf8, 0xc014e3f380, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01aa86bf8, 0xc01de51e00)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01aa86bf8, 0xc01de51e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01aa86bf8, 0xc01de51e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01aa86bf8, 0xc01de51e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01aa86bf8, 0xc01de51e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01aa86bf8, 0xc01de51e00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01aa86bf8, 0xc01de51e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01aa86bf8, 0xc01de51e00)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01aa86bf8, 0xc01de51e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01aa86bf8, 0xc01de51e00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01aa86bf8, 0xc01de51e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01aa86bf8, 0xc01de51d00)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01aa86bf8, 0xc01de51d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01deca540, 0xc01ac164e0, 0x60d7720, 0xc01aa86bf8, 0xc01de51d00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34396]
I0212 07:24:27.821186  124047 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 07:24:27.821215  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:27.821223  124047 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 07:24:27.821229  124047 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 07:24:27.821381  124047 wrap.go:47] GET /healthz: (327.378µs) 500
goroutine 28191 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01dee0380, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01dee0380, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01dee63e0, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01deba0d8, 0xc01a240a80, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01deba0d8, 0xc01cf9bc00)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01deba0d8, 0xc01cf9bc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01deba0d8, 0xc01cf9bc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01deba0d8, 0xc01cf9bc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01deba0d8, 0xc01cf9bc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01deba0d8, 0xc01cf9bc00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01deba0d8, 0xc01cf9bc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01deba0d8, 0xc01cf9bc00)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01deba0d8, 0xc01cf9bc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01deba0d8, 0xc01cf9bc00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01deba0d8, 0xc01cf9bc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01deba0d8, 0xc01cf9bb00)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01deba0d8, 0xc01cf9bb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01cecfe00, 0xc01ac164e0, 0x60d7720, 0xc01deba0d8, 0xc01cf9bb00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34396]
I0212 07:24:27.917174  124047 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 07:24:27.917216  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:27.917232  124047 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 07:24:27.917239  124047 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 07:24:27.917382  124047 wrap.go:47] GET /healthz: (376.24µs) 500
goroutine 28356 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01d2a1810, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01d2a1810, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01de2b920, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc0155ddce0, 0xc01df40300, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc0155ddce0, 0xc01df64100)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc0155ddce0, 0xc01df64100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc0155ddce0, 0xc01df64100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc0155ddce0, 0xc01df64100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc0155ddce0, 0xc01df64100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc0155ddce0, 0xc01df64100)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc0155ddce0, 0xc01df64100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc0155ddce0, 0xc01df64100)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc0155ddce0, 0xc01df64100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc0155ddce0, 0xc01df64100)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc0155ddce0, 0xc01df64100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc0155ddce0, 0xc01df64000)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc0155ddce0, 0xc01df64000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01df620c0, 0xc01ac164e0, 0x60d7720, 0xc0155ddce0, 0xc01df64000)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34396]
I0212 07:24:28.017341  124047 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 07:24:28.017392  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:28.017403  124047 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 07:24:28.017410  124047 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 07:24:28.017652  124047 wrap.go:47] GET /healthz: (521.969µs) 500
goroutine 28358 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01d2a1960, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01d2a1960, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01de2bb40, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc0155ddce8, 0xc01df40900, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc0155ddce8, 0xc01df64500)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc0155ddce8, 0xc01df64500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc0155ddce8, 0xc01df64500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc0155ddce8, 0xc01df64500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc0155ddce8, 0xc01df64500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc0155ddce8, 0xc01df64500)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc0155ddce8, 0xc01df64500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc0155ddce8, 0xc01df64500)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc0155ddce8, 0xc01df64500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc0155ddce8, 0xc01df64500)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc0155ddce8, 0xc01df64500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc0155ddce8, 0xc01df64400)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc0155ddce8, 0xc01df64400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01df621e0, 0xc01ac164e0, 0x60d7720, 0xc0155ddce8, 0xc01df64400)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34396]
I0212 07:24:28.117272  124047 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 07:24:28.117315  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:28.117336  124047 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 07:24:28.117357  124047 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 07:24:28.117605  124047 wrap.go:47] GET /healthz: (476.572µs) 500
goroutine 28331 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01de98c40, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01de98c40, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01de9d760, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01aa86c00, 0xc014e3f800, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01aa86c00, 0xc01dfa0200)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01aa86c00, 0xc01dfa0200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01aa86c00, 0xc01dfa0200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01aa86c00, 0xc01dfa0200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01aa86c00, 0xc01dfa0200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01aa86c00, 0xc01dfa0200)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01aa86c00, 0xc01dfa0200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01aa86c00, 0xc01dfa0200)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01aa86c00, 0xc01dfa0200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01aa86c00, 0xc01dfa0200)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01aa86c00, 0xc01dfa0200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01aa86c00, 0xc01dfa0100)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01aa86c00, 0xc01dfa0100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01deca600, 0xc01ac164e0, 0x60d7720, 0xc01aa86c00, 0xc01dfa0100)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34396]
I0212 07:24:28.167221  124047 clientconn.go:551] parsed scheme: ""
I0212 07:24:28.167266  124047 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 07:24:28.167323  124047 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 07:24:28.167408  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:28.167799  124047 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 07:24:28.167889  124047 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 07:24:28.218337  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:28.218372  124047 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 07:24:28.218382  124047 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 07:24:28.218568  124047 wrap.go:47] GET /healthz: (1.382496ms) 500
goroutine 28193 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01dee0540, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01dee0540, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01dee6780, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01deba0e0, 0xc01919d340, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01deba0e0, 0xc01dfe2000)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01deba0e0, 0xc01dfe2000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01deba0e0, 0xc01dfe2000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01deba0e0, 0xc01dfe2000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01deba0e0, 0xc01dfe2000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01deba0e0, 0xc01dfe2000)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01deba0e0, 0xc01dfe2000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01deba0e0, 0xc01dfe2000)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01deba0e0, 0xc01dfe2000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01deba0e0, 0xc01dfe2000)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01deba0e0, 0xc01dfe2000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01deba0e0, 0xc01cf9bf00)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01deba0e0, 0xc01cf9bf00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01cecff80, 0xc01ac164e0, 0x60d7720, 0xc01deba0e0, 0xc01cf9bf00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34396]
I0212 07:24:28.317479  124047 wrap.go:47] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-node-critical: (1.341589ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.317750  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:28.317768  124047 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 07:24:28.317775  124047 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 07:24:28.317947  124047 wrap.go:47] GET /healthz: (915.946µs) 500
goroutine 28367 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01d2a1c00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01d2a1c00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01e00e140, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc0155ddd50, 0xc003a91e40, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc0155ddd50, 0xc01df65000)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc0155ddd50, 0xc01df65000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc0155ddd50, 0xc01df65000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc0155ddd50, 0xc01df65000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc0155ddd50, 0xc01df65000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc0155ddd50, 0xc01df65000)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc0155ddd50, 0xc01df65000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc0155ddd50, 0xc01df65000)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc0155ddd50, 0xc01df65000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc0155ddd50, 0xc01df65000)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc0155ddd50, 0xc01df65000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc0155ddd50, 0xc01df64f00)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc0155ddd50, 0xc01df64f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01df62900, 0xc01ac164e0, 0x60d7720, 0xc0155ddd50, 0xc01df64f00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34654]
I0212 07:24:28.317950  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.899956ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34396]
I0212 07:24:28.318021  124047 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.217417ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34650]
I0212 07:24:28.319750  124047 wrap.go:47] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (1.747308ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.319772  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.226285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34396]
I0212 07:24:28.319858  124047 wrap.go:47] GET /api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication: (1.359993ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34654]
I0212 07:24:28.320013  124047 storage_scheduling.go:91] created PriorityClass system-node-critical with value 2000001000
I0212 07:24:28.321404  124047 wrap.go:47] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-cluster-critical: (1.116255ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34396]
I0212 07:24:28.321417  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (1.286002ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.322082  124047 wrap.go:47] POST /api/v1/namespaces/kube-system/configmaps: (1.432935ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:28.322995  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (1.184313ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34396]
I0212 07:24:28.323258  124047 wrap.go:47] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (1.377578ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.323421  124047 storage_scheduling.go:91] created PriorityClass system-cluster-critical with value 2000000000
I0212 07:24:28.323439  124047 storage_scheduling.go:100] all system priority classes are created successfully or already exist.
I0212 07:24:28.324150  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (813.349µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34396]
I0212 07:24:28.325245  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (708.027µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.326349  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (713.569µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.327625  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (834.028µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.328847  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/cluster-admin: (843.036µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.331261  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.810703ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.331720  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/cluster-admin
I0212 07:24:28.332822  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:discovery: (881.298µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.336984  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.760868ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.337379  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:discovery
I0212 07:24:28.338422  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:basic-user: (842.08µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.341266  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.431325ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.341914  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:basic-user
I0212 07:24:28.343021  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (815.172µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.347372  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.824965ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.347631  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/admin
I0212 07:24:28.348854  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (970.047µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.351075  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.75665ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.351278  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/edit
I0212 07:24:28.352286  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (758.224µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.354658  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.948043ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.354831  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/view
I0212 07:24:28.355822  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (756.828µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.358217  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.979686ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.358460  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-admin
I0212 07:24:28.359538  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (855.915µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.362287  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.285752ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.362707  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-edit
I0212 07:24:28.363903  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (999.033µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.366761  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.410767ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.367094  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-view
I0212 07:24:28.368169  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:heapster: (821.032µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.370231  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.617878ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.370451  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:heapster
I0212 07:24:28.371728  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node: (1.070342ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.374449  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.249989ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.374838  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node
I0212 07:24:28.376248  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-problem-detector: (1.168112ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.378105  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.468638ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.378317  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node-problem-detector
I0212 07:24:28.379441  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-proxier: (889.876µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.381654  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.752193ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.381864  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node-proxier
I0212 07:24:28.382912  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kubelet-api-admin: (865.035µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.385198  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.839747ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.385450  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kubelet-api-admin
I0212 07:24:28.386544  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-bootstrapper: (873.621µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.388353  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.360245ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.388567  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node-bootstrapper
I0212 07:24:28.389780  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:auth-delegator: (934.118µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.391801  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.610971ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.392105  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:auth-delegator
I0212 07:24:28.393287  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-aggregator: (918.667µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.396361  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.360284ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.396992  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-aggregator
I0212 07:24:28.398466  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-controller-manager: (1.244644ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.402725  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.726022ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.403104  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-controller-manager
I0212 07:24:28.404459  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-scheduler: (1.118709ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.408273  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.195246ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.408663  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-scheduler
I0212 07:24:28.409722  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-dns: (882.359µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.411961  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.746943ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.412202  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-dns
I0212 07:24:28.413458  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:persistent-volume-provisioner: (1.054ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.417469  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.306704ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.417805  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:persistent-volume-provisioner
I0212 07:24:28.418427  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:28.418704  124047 wrap.go:47] GET /healthz: (1.015338ms) 500
goroutine 28516 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01e374380, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01e374380, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01e388100, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e1e8590, 0xc019c1cdc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e1e8590, 0xc01e37c900)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e1e8590, 0xc01e37c900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e1e8590, 0xc01e37c900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e1e8590, 0xc01e37c900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e1e8590, 0xc01e37c900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e1e8590, 0xc01e37c900)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e1e8590, 0xc01e37c900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e1e8590, 0xc01e37c900)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e1e8590, 0xc01e37c900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e1e8590, 0xc01e37c900)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e1e8590, 0xc01e37c900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e1e8590, 0xc01e37c800)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e1e8590, 0xc01e37c800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01e31f1a0, 0xc01ac164e0, 0x60d7720, 0xc01e1e8590, 0xc01e37c800)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34658]
I0212 07:24:28.419217  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-attacher: (1.103767ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.421252  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.561223ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.421522  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:csi-external-attacher
I0212 07:24:28.422698  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aws-cloud-provider: (942.129µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.428862  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (4.729274ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.429475  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aws-cloud-provider
I0212 07:24:28.431464  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:nodeclient: (1.300961ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.433672  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.67518ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.433884  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:nodeclient
I0212 07:24:28.436058  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient: (1.95331ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.438222  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.496868ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.438551  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
I0212 07:24:28.439575  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:volume-scheduler: (766.982µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.441442  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.433696ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.441716  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:volume-scheduler
I0212 07:24:28.442731  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-provisioner: (772.191µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.444908  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.63951ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.445116  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:csi-external-provisioner
I0212 07:24:28.446306  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:attachdetach-controller: (917.242µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.448295  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.551903ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.448545  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I0212 07:24:28.449529  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:clusterrole-aggregation-controller: (794.83µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.451554  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.621943ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.451751  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I0212 07:24:28.452908  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:cronjob-controller: (867.129µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.455110  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.784585ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.455353  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:cronjob-controller
I0212 07:24:28.456369  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:daemon-set-controller: (766.882µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.458723  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.854622ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.458963  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I0212 07:24:28.460163  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:deployment-controller: (948.829µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.462220  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.658246ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.462469  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:deployment-controller
I0212 07:24:28.463663  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:disruption-controller: (985.596µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.466130  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.778379ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.468920  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:disruption-controller
I0212 07:24:28.470561  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:endpoint-controller: (1.343343ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.472732  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.706189ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.473051  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:endpoint-controller
I0212 07:24:28.474240  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:expand-controller: (966.83µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.476709  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.994049ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.476944  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:expand-controller
I0212 07:24:28.478073  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:generic-garbage-collector: (907.59µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.479832  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.348758ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.480146  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I0212 07:24:28.481348  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:horizontal-pod-autoscaler: (1.024644ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.483426  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.632972ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.483979  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I0212 07:24:28.485297  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:job-controller: (1.079931ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.487343  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.621801ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.487612  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:job-controller
I0212 07:24:28.488700  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:namespace-controller: (897.238µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.490810  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.576727ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.491056  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:namespace-controller
I0212 07:24:28.492137  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:node-controller: (861.717µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.494113  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.528163ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.494433  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:node-controller
I0212 07:24:28.495506  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:persistent-volume-binder: (807.118µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.497778  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.848407ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.498368  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I0212 07:24:28.499518  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pod-garbage-collector: (909.989µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.501737  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.826038ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.501960  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I0212 07:24:28.503090  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replicaset-controller: (894.602µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.505273  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.616314ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.505569  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:replicaset-controller
I0212 07:24:28.506689  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replication-controller: (886.908µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.508974  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.841858ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.509275  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:replication-controller
I0212 07:24:28.510425  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:resourcequota-controller: (850.65µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.512606  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.766778ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.512887  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I0212 07:24:28.514009  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:route-controller: (916.566µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.516434  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.544718ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.516698  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:route-controller
I0212 07:24:28.518432  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-account-controller: (1.029415ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.521199  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.903677ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.521431  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:service-account-controller
I0212 07:24:28.521835  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:28.522016  124047 wrap.go:47] GET /healthz: (1.013196ms) 500
goroutine 28551 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01e706620, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01e706620, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01e70ab80, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e1e88e0, 0xc0049b3cc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e1e88e0, 0xc01e443f00)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e1e88e0, 0xc01e443f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e1e88e0, 0xc01e443f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e1e88e0, 0xc01e443f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e1e88e0, 0xc01e443f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e1e88e0, 0xc01e443f00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e1e88e0, 0xc01e443f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e1e88e0, 0xc01e443f00)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e1e88e0, 0xc01e443f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e1e88e0, 0xc01e443f00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e1e88e0, 0xc01e443f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e1e88e0, 0xc01e443e00)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e1e88e0, 0xc01e443e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01e45f740, 0xc01ac164e0, 0x60d7720, 0xc01e1e88e0, 0xc01e443e00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34658]
I0212 07:24:28.522552  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-controller: (907.354µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.524261  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.358906ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.524528  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:service-controller
I0212 07:24:28.525774  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:statefulset-controller: (1.017465ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.528009  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.747421ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.528259  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:statefulset-controller
I0212 07:24:28.529369  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:ttl-controller: (900.557µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.531209  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.318766ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.531481  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:ttl-controller
I0212 07:24:28.532486  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:certificate-controller: (767.702µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.535112  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.165818ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.535424  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:certificate-controller
I0212 07:24:28.537580  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pvc-protection-controller: (1.30668ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.560695  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.617522ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.561157  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I0212 07:24:28.577785  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pv-protection-controller: (1.541443ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.599173  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.918319ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.599532  124047 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I0212 07:24:28.617398  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/cluster-admin: (1.112163ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.617630  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:28.617822  124047 wrap.go:47] GET /healthz: (838.112µs) 500
goroutine 28624 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01e695110, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01e695110, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01e703ba0, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e5944a8, 0xc01e83a140, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e5944a8, 0xc01e7b4c00)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e5944a8, 0xc01e7b4c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e5944a8, 0xc01e7b4c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e5944a8, 0xc01e7b4c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e5944a8, 0xc01e7b4c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e5944a8, 0xc01e7b4c00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e5944a8, 0xc01e7b4c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e5944a8, 0xc01e7b4c00)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e5944a8, 0xc01e7b4c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e5944a8, 0xc01e7b4c00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e5944a8, 0xc01e7b4c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e5944a8, 0xc01e7b4b00)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e5944a8, 0xc01e7b4b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01e7f0840, 0xc01ac164e0, 0x60d7720, 0xc01e5944a8, 0xc01e7b4b00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34658]
I0212 07:24:28.638392  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.11241ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:28.638693  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/cluster-admin
I0212 07:24:28.657458  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:discovery: (1.156265ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:28.678351  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.053062ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:28.678588  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:discovery
I0212 07:24:28.697629  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:basic-user: (1.299633ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:28.718254  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:28.718481  124047 wrap.go:47] GET /healthz: (1.51898ms) 500
goroutine 28561 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01e8a4000, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01e8a4000, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01e7f9d00, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e1e8c50, 0xc014ed9e00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e1e8c50, 0xc01e86eb00)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e1e8c50, 0xc01e86eb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e1e8c50, 0xc01e86eb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e1e8c50, 0xc01e86eb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e1e8c50, 0xc01e86eb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e1e8c50, 0xc01e86eb00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e1e8c50, 0xc01e86eb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e1e8c50, 0xc01e86eb00)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e1e8c50, 0xc01e86eb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e1e8c50, 0xc01e86eb00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e1e8c50, 0xc01e86eb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e1e8c50, 0xc01e86ea00)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e1e8c50, 0xc01e86ea00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01e872300, 0xc01ac164e0, 0x60d7720, 0xc01e1e8c50, 0xc01e86ea00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34398]
I0212 07:24:28.718658  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.417061ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:28.718918  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:basic-user
I0212 07:24:28.737636  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node-proxier: (1.297157ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:28.757979  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.769148ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:28.758204  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:node-proxier
I0212 07:24:28.777649  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-controller-manager: (1.274577ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:28.798162  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.936344ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:28.798556  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-controller-manager
I0212 07:24:28.817398  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-dns: (1.21027ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:28.817619  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:28.817762  124047 wrap.go:47] GET /healthz: (828.984µs) 500
goroutine 28685 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01e7b1ce0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01e7b1ce0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01e8f46c0, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01aa87f68, 0xc01d2aa3c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01aa87f68, 0xc01e88cf00)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01aa87f68, 0xc01e88cf00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01aa87f68, 0xc01e88cf00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01aa87f68, 0xc01e88cf00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01aa87f68, 0xc01e88cf00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01aa87f68, 0xc01e88cf00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01aa87f68, 0xc01e88cf00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01aa87f68, 0xc01e88cf00)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01aa87f68, 0xc01e88cf00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01aa87f68, 0xc01e88cf00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01aa87f68, 0xc01e88cf00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01aa87f68, 0xc01e88ce00)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01aa87f68, 0xc01e88ce00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01e892ba0, 0xc01ac164e0, 0x60d7720, 0xc01aa87f68, 0xc01e88ce00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34398]
I0212 07:24:28.838440  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.032774ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.838723  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-dns
I0212 07:24:28.857621  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-scheduler: (1.294191ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.878281  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.068859ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.878568  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-scheduler
I0212 07:24:28.897750  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:aws-cloud-provider: (1.336417ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.918064  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:28.918268  124047 wrap.go:47] GET /healthz: (1.296006ms) 500
goroutine 28724 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01e8d3030, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01e8d3030, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01e97c000, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc015959cd0, 0xc01d2aa780, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc015959cd0, 0xc01e90ae00)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc015959cd0, 0xc01e90ae00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc015959cd0, 0xc01e90ae00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc015959cd0, 0xc01e90ae00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc015959cd0, 0xc01e90ae00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc015959cd0, 0xc01e90ae00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc015959cd0, 0xc01e90ae00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc015959cd0, 0xc01e90ae00)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc015959cd0, 0xc01e90ae00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc015959cd0, 0xc01e90ae00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc015959cd0, 0xc01e90ae00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc015959cd0, 0xc01e90ad00)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc015959cd0, 0xc01e90ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01e8990e0, 0xc01ac164e0, 0x60d7720, 0xc015959cd0, 0xc01e90ad00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34658]
I0212 07:24:28.918485  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.23894ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.918775  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:aws-cloud-provider
I0212 07:24:28.937401  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:volume-scheduler: (1.081043ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.958178  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.943402ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.958466  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:volume-scheduler
I0212 07:24:28.977394  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node: (1.14558ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.998431  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.202275ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:28.998673  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:node
I0212 07:24:29.017369  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:attachdetach-controller: (1.095507ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.017901  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:29.018076  124047 wrap.go:47] GET /healthz: (887.974µs) 500
goroutine 28697 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01e8f0620, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01e8f0620, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01e9c24e0, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e594670, 0xc010fdf2c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e594670, 0xc01e9e4700)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e594670, 0xc01e9e4700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e594670, 0xc01e9e4700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e594670, 0xc01e9e4700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e594670, 0xc01e9e4700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e594670, 0xc01e9e4700)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e594670, 0xc01e9e4700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e594670, 0xc01e9e4700)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e594670, 0xc01e9e4700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e594670, 0xc01e9e4700)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e594670, 0xc01e9e4700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e594670, 0xc01e9e4600)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e594670, 0xc01e9e4600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01e7f18c0, 0xc01ac164e0, 0x60d7720, 0xc01e594670, 0xc01e9e4600)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34658]
I0212 07:24:29.038119  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.841736ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.038432  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I0212 07:24:29.057308  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:clusterrole-aggregation-controller: (1.081383ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.078420  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.144449ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.078687  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I0212 07:24:29.097562  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:cronjob-controller: (1.352591ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.118203  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:29.118386  124047 wrap.go:47] GET /healthz: (1.477001ms) 500
goroutine 28704 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01e8f11f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01e8f11f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01e9c3d60, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e5947d0, 0xc01ea46140, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e5947d0, 0xc01e9e5d00)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e5947d0, 0xc01e9e5d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e5947d0, 0xc01e9e5d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e5947d0, 0xc01e9e5d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e5947d0, 0xc01e9e5d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e5947d0, 0xc01e9e5d00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e5947d0, 0xc01e9e5d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e5947d0, 0xc01e9e5d00)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e5947d0, 0xc01e9e5d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e5947d0, 0xc01e9e5d00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e5947d0, 0xc01e9e5d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e5947d0, 0xc01e9e5c00)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e5947d0, 0xc01e9e5c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01ea1c240, 0xc01ac164e0, 0x60d7720, 0xc01e5947d0, 0xc01e9e5c00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34398]
I0212 07:24:29.118441  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.120535ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.118677  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:cronjob-controller
I0212 07:24:29.137853  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:daemon-set-controller: (1.599636ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.158889  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.58309ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.159207  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I0212 07:24:29.178020  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:deployment-controller: (1.401928ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.198361  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.10989ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.198669  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:deployment-controller
I0212 07:24:29.217529  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:disruption-controller: (1.244578ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.217935  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:29.218149  124047 wrap.go:47] GET /healthz: (1.257752ms) 500
goroutine 28787 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01e6adb90, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01e6adb90, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01ea906e0, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e56e540, 0xc01e83a500, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e56e540, 0xc01e979900)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e56e540, 0xc01e979900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e56e540, 0xc01e979900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e56e540, 0xc01e979900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e56e540, 0xc01e979900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e56e540, 0xc01e979900)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e56e540, 0xc01e979900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e56e540, 0xc01e979900)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e56e540, 0xc01e979900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e56e540, 0xc01e979900)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e56e540, 0xc01e979900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e56e540, 0xc01e979800)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e56e540, 0xc01e979800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01e5fbbc0, 0xc01ac164e0, 0x60d7720, 0xc01e56e540, 0xc01e979800)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34658]
I0212 07:24:29.238347  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.0347ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.238635  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:disruption-controller
I0212 07:24:29.257530  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:endpoint-controller: (1.341994ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.278727  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.439489ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.278956  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:endpoint-controller
I0212 07:24:29.297620  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:expand-controller: (1.355864ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.318618  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.429161ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.318928  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:expand-controller
I0212 07:24:29.319089  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:29.319227  124047 wrap.go:47] GET /healthz: (1.637936ms) 500
goroutine 28778 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01eaea3f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01eaea3f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01ead0cc0, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01dd353f0, 0xc01e938280, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01dd353f0, 0xc01eac5200)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01dd353f0, 0xc01eac5200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01dd353f0, 0xc01eac5200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01dd353f0, 0xc01eac5200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01dd353f0, 0xc01eac5200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01dd353f0, 0xc01eac5200)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01dd353f0, 0xc01eac5200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01dd353f0, 0xc01eac5200)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01dd353f0, 0xc01eac5200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01dd353f0, 0xc01eac5200)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01dd353f0, 0xc01eac5200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01dd353f0, 0xc01eac5100)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01dd353f0, 0xc01eac5100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01eb00000, 0xc01ac164e0, 0x60d7720, 0xc01dd353f0, 0xc01eac5100)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34398]
I0212 07:24:29.337962  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:generic-garbage-collector: (1.187843ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.358614  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.333695ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.358908  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I0212 07:24:29.377958  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:horizontal-pod-autoscaler: (1.418612ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.398308  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.074406ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.398560  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I0212 07:24:29.418888  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:29.419118  124047 wrap.go:47] GET /healthz: (2.029451ms) 500
goroutine 28760 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01e947810, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01e947810, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01ea93da0, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e9662f8, 0xc01ea46640, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e9662f8, 0xc01eb5e500)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e9662f8, 0xc01eb5e500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e9662f8, 0xc01eb5e500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e9662f8, 0xc01eb5e500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e9662f8, 0xc01eb5e500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e9662f8, 0xc01eb5e500)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e9662f8, 0xc01eb5e500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e9662f8, 0xc01eb5e500)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e9662f8, 0xc01eb5e500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e9662f8, 0xc01eb5e500)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e9662f8, 0xc01eb5e500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e9662f8, 0xc01eb5e400)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e9662f8, 0xc01eb5e400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01e6274a0, 0xc01ac164e0, 0x60d7720, 0xc01e9662f8, 0xc01eb5e400)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34398]
I0212 07:24:29.419790  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:job-controller: (2.401795ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.438805  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.44341ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.439093  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:job-controller
I0212 07:24:29.458992  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:namespace-controller: (2.76759ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.478781  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.508308ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.479163  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:namespace-controller
I0212 07:24:29.497703  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:node-controller: (1.506148ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.518096  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:29.518265  124047 wrap.go:47] GET /healthz: (1.009633ms) 500
goroutine 28791 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01eabe700, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01eabe700, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01ea91820, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e56e670, 0xc019c1d2c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e56e670, 0xc01eb88800)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e56e670, 0xc01eb88800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e56e670, 0xc01eb88800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e56e670, 0xc01eb88800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e56e670, 0xc01eb88800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e56e670, 0xc01eb88800)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e56e670, 0xc01eb88800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e56e670, 0xc01eb88800)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e56e670, 0xc01eb88800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e56e670, 0xc01eb88800)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e56e670, 0xc01eb88800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e56e670, 0xc01eb88700)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e56e670, 0xc01eb88700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01eb86840, 0xc01ac164e0, 0x60d7720, 0xc01e56e670, 0xc01eb88700)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34398]
I0212 07:24:29.519237  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.64759ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.519525  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:node-controller
I0212 07:24:29.539415  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:persistent-volume-binder: (2.578945ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.558567  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.953917ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.558910  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I0212 07:24:29.577305  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pod-garbage-collector: (1.017895ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.598480  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.798141ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.598782  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I0212 07:24:29.617833  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replicaset-controller: (1.540388ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.618099  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:29.618324  124047 wrap.go:47] GET /healthz: (1.277678ms) 500
goroutine 28823 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01eba08c0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01eba08c0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01ebcc5a0, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e966518, 0xc019c1d680, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e966518, 0xc01ebb6c00)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e966518, 0xc01ebb6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e966518, 0xc01ebb6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e966518, 0xc01ebb6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e966518, 0xc01ebb6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e966518, 0xc01ebb6c00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e966518, 0xc01ebb6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e966518, 0xc01ebb6c00)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e966518, 0xc01ebb6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e966518, 0xc01ebb6c00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e966518, 0xc01ebb6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e966518, 0xc01ebb6b00)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e966518, 0xc01ebb6b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01eb935c0, 0xc01ac164e0, 0x60d7720, 0xc01e966518, 0xc01ebb6b00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34398]
I0212 07:24:29.639931  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.675983ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.640383  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replicaset-controller
I0212 07:24:29.657855  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replication-controller: (1.586385ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.679139  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.75338ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.679441  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replication-controller
I0212 07:24:29.697758  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:resourcequota-controller: (1.419418ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.718203  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:29.718579  124047 wrap.go:47] GET /healthz: (1.644473ms) 500
goroutine 28795 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01eabee70, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01eabee70, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01ec5cb00, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e56e7a8, 0xc019c1da40, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e56e7a8, 0xc01eb89600)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e56e7a8, 0xc01eb89600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e56e7a8, 0xc01eb89600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e56e7a8, 0xc01eb89600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e56e7a8, 0xc01eb89600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e56e7a8, 0xc01eb89600)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e56e7a8, 0xc01eb89600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e56e7a8, 0xc01eb89600)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e56e7a8, 0xc01eb89600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e56e7a8, 0xc01eb89600)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e56e7a8, 0xc01eb89600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e56e7a8, 0xc01eb89500)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e56e7a8, 0xc01eb89500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01eb87080, 0xc01ac164e0, 0x60d7720, 0xc01e56e7a8, 0xc01eb89500)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34658]
I0212 07:24:29.718617  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.355234ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.718821  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I0212 07:24:29.737887  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:route-controller: (1.636403ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.758785  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.488757ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.759076  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:route-controller
I0212 07:24:29.778118  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-account-controller: (1.833362ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.801169  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (4.858838ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.801481  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-account-controller
I0212 07:24:29.817977  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-controller: (1.573744ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:29.818279  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:29.818539  124047 wrap.go:47] GET /healthz: (1.425719ms) 500
goroutine 28853 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01ebf30a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01ebf30a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01ebed980, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01dd35838, 0xc01e8c8640, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01dd35838, 0xc01ecd0400)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01dd35838, 0xc01ecd0400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01dd35838, 0xc01ecd0400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01dd35838, 0xc01ecd0400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01dd35838, 0xc01ecd0400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01dd35838, 0xc01ecd0400)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01dd35838, 0xc01ecd0400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01dd35838, 0xc01ecd0400)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01dd35838, 0xc01ecd0400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01dd35838, 0xc01ecd0400)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01dd35838, 0xc01ecd0400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01dd35838, 0xc01ecd0300)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01dd35838, 0xc01ecd0300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01ec204e0, 0xc01ac164e0, 0x60d7720, 0xc01dd35838, 0xc01ecd0300)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34658]
I0212 07:24:29.838436  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.13697ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.839203  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-controller
I0212 07:24:29.857697  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:statefulset-controller: (1.418213ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.878716  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.407627ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.879166  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:statefulset-controller
I0212 07:24:29.897643  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:ttl-controller: (1.377722ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.917960  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:29.918203  124047 wrap.go:47] GET /healthz: (1.283375ms) 500
goroutine 28800 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01eabfe30, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01eabfe30, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01ecd48e0, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e56e948, 0xc01e83ac80, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e56e948, 0xc01ecbe800)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e56e948, 0xc01ecbe800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e56e948, 0xc01ecbe800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e56e948, 0xc01ecbe800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e56e948, 0xc01ecbe800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e56e948, 0xc01ecbe800)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e56e948, 0xc01ecbe800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e56e948, 0xc01ecbe800)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e56e948, 0xc01ecbe800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e56e948, 0xc01ecbe800)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e56e948, 0xc01ecbe800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e56e948, 0xc01ecbe700)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e56e948, 0xc01ecbe700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01ed60000, 0xc01ac164e0, 0x60d7720, 0xc01e56e948, 0xc01ecbe700)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34398]
I0212 07:24:29.918447  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.187135ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.918753  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:ttl-controller
I0212 07:24:29.937750  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:certificate-controller: (1.33502ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.958676  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.396262ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.958968  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:certificate-controller
I0212 07:24:29.979989  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pvc-protection-controller: (1.322666ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.998353  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.067833ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:29.998653  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I0212 07:24:30.018626  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:30.018782  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pv-protection-controller: (1.641083ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.018828  124047 wrap.go:47] GET /healthz: (1.260813ms) 500
goroutine 28875 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01ed9e620, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01ed9e620, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01edaa980, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e5949f0, 0xc010fdf900, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e5949f0, 0xc01ecefe00)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e5949f0, 0xc01ecefe00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e5949f0, 0xc01ecefe00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e5949f0, 0xc01ecefe00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e5949f0, 0xc01ecefe00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e5949f0, 0xc01ecefe00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e5949f0, 0xc01ecefe00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e5949f0, 0xc01ecefe00)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e5949f0, 0xc01ecefe00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e5949f0, 0xc01ecefe00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e5949f0, 0xc01ecefe00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e5949f0, 0xc01ecefd00)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e5949f0, 0xc01ecefd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01ea1d620, 0xc01ac164e0, 0x60d7720, 0xc01e5949f0, 0xc01ecefd00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34658]
I0212 07:24:30.038681  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.361901ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.039090  124047 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I0212 07:24:30.057728  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/extension-apiserver-authentication-reader: (1.483239ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.059580  124047 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.430106ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.078696  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.200963ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.078995  124047 storage_rbac.go:246] created role.rbac.authorization.k8s.io/extension-apiserver-authentication-reader in kube-system
I0212 07:24:30.097848  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:bootstrap-signer: (1.42459ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.099720  124047 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.368727ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.117778  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:30.118293  124047 wrap.go:47] GET /healthz: (1.32061ms) 500
goroutine 28914 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01ed9f180, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01ed9f180, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01ee18540, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e594b28, 0xc019c1de00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e594b28, 0xc01edd5600)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e594b28, 0xc01edd5600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e594b28, 0xc01edd5600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e594b28, 0xc01edd5600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e594b28, 0xc01edd5600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e594b28, 0xc01edd5600)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e594b28, 0xc01edd5600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e594b28, 0xc01edd5600)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e594b28, 0xc01edd5600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e594b28, 0xc01edd5600)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e594b28, 0xc01edd5600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e594b28, 0xc01edd5500)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e594b28, 0xc01edd5500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01ee021e0, 0xc01ac164e0, 0x60d7720, 0xc01e594b28, 0xc01edd5500)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34658]
I0212 07:24:30.118527  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.267326ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.118791  124047 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I0212 07:24:30.138037  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:cloud-provider: (1.748944ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.140037  124047 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.400146ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.161026  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (4.79907ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.166105  124047 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0212 07:24:30.177404  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:token-cleaner: (1.226017ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.179225  124047 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.344813ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.199426  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (3.190913ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.199876  124047 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I0212 07:24:30.217742  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-controller-manager: (1.436008ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.218019  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:30.218214  124047 wrap.go:47] GET /healthz: (1.334842ms) 500
goroutine 28918 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01ee8a5b0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01ee8a5b0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01ee90b80, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e594ce0, 0xc01e938640, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e594ce0, 0xc01ee78900)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e594ce0, 0xc01ee78900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e594ce0, 0xc01ee78900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e594ce0, 0xc01ee78900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e594ce0, 0xc01ee78900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e594ce0, 0xc01ee78900)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e594ce0, 0xc01ee78900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e594ce0, 0xc01ee78900)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e594ce0, 0xc01ee78900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e594ce0, 0xc01ee78900)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e594ce0, 0xc01ee78900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e594ce0, 0xc01ee78800)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e594ce0, 0xc01ee78800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01ee02e40, 0xc01ac164e0, 0x60d7720, 0xc01e594ce0, 0xc01ee78800)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34658]
I0212 07:24:30.219854  124047 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.630475ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.238660  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.317245ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.238949  124047 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I0212 07:24:30.257541  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-scheduler: (1.337288ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.259325  124047 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.302153ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.278243  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (1.982878ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.278579  124047 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I0212 07:24:30.297487  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles/system:controller:bootstrap-signer: (1.222ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.299355  124047 wrap.go:47] GET /api/v1/namespaces/kube-public: (1.374709ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.319581  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles: (3.35427ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.319658  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:30.319978  124047 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I0212 07:24:30.323173  124047 wrap.go:47] GET /healthz: (5.831848ms) 500
goroutine 28948 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01ee2cfc0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01ee2cfc0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01ef02f20, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01e56ee48, 0xc01ea46dc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01e56ee48, 0xc01eec8c00)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01e56ee48, 0xc01eec8c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01e56ee48, 0xc01eec8c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01e56ee48, 0xc01eec8c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01e56ee48, 0xc01eec8c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01e56ee48, 0xc01eec8c00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01e56ee48, 0xc01eec8c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01e56ee48, 0xc01eec8c00)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01e56ee48, 0xc01eec8c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01e56ee48, 0xc01eec8c00)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01e56ee48, 0xc01eec8c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01e56ee48, 0xc01eec8b00)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01e56ee48, 0xc01eec8b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01ef20240, 0xc01ac164e0, 0x60d7720, 0xc01e56ee48, 0xc01eec8b00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34658]
I0212 07:24:30.337942  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::extension-apiserver-authentication-reader: (1.614854ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:30.339933  124047 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.349469ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:30.358683  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.35504ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:30.359011  124047 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system::extension-apiserver-authentication-reader in kube-system
I0212 07:24:30.381237  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-controller-manager: (1.530112ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:30.384515  124047 wrap.go:47] GET /api/v1/namespaces/kube-system: (2.415069ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:30.398422  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.197937ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:30.398689  124047 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I0212 07:24:30.417713  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:30.417730  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-scheduler: (1.497713ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:30.417915  124047 wrap.go:47] GET /healthz: (944.031µs) 500
goroutine 28963 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01eebf110, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01eebf110, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01eed5e20, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01ec0a808, 0xc01e938b40, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01ec0a808, 0xc01ef9e000)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01ec0a808, 0xc01ef9e000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01ec0a808, 0xc01ef9e000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01ec0a808, 0xc01ef9e000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01ec0a808, 0xc01ef9e000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01ec0a808, 0xc01ef9e000)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01ec0a808, 0xc01ef9e000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01ec0a808, 0xc01ef9e000)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01ec0a808, 0xc01ef9e000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01ec0a808, 0xc01ef9e000)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01ec0a808, 0xc01ef9e000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01ec0a808, 0xc01eed7f00)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01ec0a808, 0xc01eed7f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01ef9c060, 0xc01ac164e0, 0x60d7720, 0xc01ec0a808, 0xc01eed7f00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34398]
I0212 07:24:30.419883  124047 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.513422ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.439259  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.986469ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.439583  124047 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I0212 07:24:30.459855  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:bootstrap-signer: (3.548534ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.461876  124047 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.500594ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.478632  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.426171ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.478894  124047 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I0212 07:24:30.497651  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:cloud-provider: (1.371943ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.499607  124047 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.40498ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.518092  124047 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 07:24:30.518263  124047 wrap.go:47] GET /healthz: (1.167718ms) 500
goroutine 28888 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01f01a070, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01f01a070, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01ef27fe0, 0x1f4)
net/http.Error(0x7f0a860eda60, 0xc01debb570, 0xc01e938f00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f0a860eda60, 0xc01debb570, 0xc01efeb300)
net/http.HandlerFunc.ServeHTTP(0xc01dd48b60, 0x7f0a860eda60, 0xc01debb570, 0xc01efeb300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc01dd50800, 0x7f0a860eda60, 0xc01debb570, 0xc01efeb300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc013d3e380, 0x7f0a860eda60, 0xc01debb570, 0xc01efeb300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41bac8d, 0xe, 0xc01ab93320, 0xc013d3e380, 0x7f0a860eda60, 0xc01debb570, 0xc01efeb300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f0a860eda60, 0xc01debb570, 0xc01efeb300)
net/http.HandlerFunc.ServeHTTP(0xc01abb9140, 0x7f0a860eda60, 0xc01debb570, 0xc01efeb300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f0a860eda60, 0xc01debb570, 0xc01efeb300)
net/http.HandlerFunc.ServeHTTP(0xc013d3ca20, 0x7f0a860eda60, 0xc01debb570, 0xc01efeb300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f0a860eda60, 0xc01debb570, 0xc01efeb300)
net/http.HandlerFunc.ServeHTTP(0xc01abb9180, 0x7f0a860eda60, 0xc01debb570, 0xc01efeb300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f0a860eda60, 0xc01debb570, 0xc01efeb200)
net/http.HandlerFunc.ServeHTTP(0xc01abc7360, 0x7f0a860eda60, 0xc01debb570, 0xc01efeb200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01ee50ea0, 0xc01ac164e0, 0x60d7720, 0xc01debb570, 0xc01efeb200)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:34658]
I0212 07:24:30.518635  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.344467ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.518904  124047 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0212 07:24:30.537673  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:token-cleaner: (1.290245ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.539394  124047 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.311691ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.558585  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.314164ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.558828  124047 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I0212 07:24:30.577846  124047 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings/system:controller:bootstrap-signer: (1.4575ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.579873  124047 wrap.go:47] GET /api/v1/namespaces/kube-public: (1.482384ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.598584  124047 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings: (2.249724ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.598869  124047 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I0212 07:24:30.618340  124047 wrap.go:47] GET /healthz: (1.220187ms) 200 [Go-http-client/1.1 127.0.0.1:34398]
W0212 07:24:30.619259  124047 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 07:24:30.619304  124047 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 07:24:30.619338  124047 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 07:24:30.619350  124047 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 07:24:30.619359  124047 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 07:24:30.619369  124047 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 07:24:30.619379  124047 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 07:24:30.619391  124047 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 07:24:30.619400  124047 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 07:24:30.619409  124047 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
I0212 07:24:30.619460  124047 factory.go:331] Creating scheduler from algorithm provider 'DefaultProvider'
I0212 07:24:30.619469  124047 factory.go:412] Creating scheduler with fit predicates 'map[CheckNodeCondition:{} MaxEBSVolumeCount:{} MaxAzureDiskVolumeCount:{} MatchInterPodAffinity:{} NoDiskConflict:{} CheckNodeDiskPressure:{} MaxGCEPDVolumeCount:{} PodToleratesNodeTaints:{} MaxCSIVolumeCountPred:{} NoVolumeZoneConflict:{} GeneralPredicates:{} CheckNodeMemoryPressure:{} CheckNodePIDPressure:{} CheckVolumeBinding:{}]' and priority functions 'map[LeastRequestedPriority:{} BalancedResourceAllocation:{} NodePreferAvoidPodsPriority:{} NodeAffinityPriority:{} TaintTolerationPriority:{} ImageLocalityPriority:{} SelectorSpreadPriority:{} InterPodAffinityPriority:{}]'
I0212 07:24:30.619667  124047 controller_utils.go:1021] Waiting for caches to sync for scheduler controller
I0212 07:24:30.620028  124047 reflector.go:132] Starting reflector *v1.Pod (12h0m0s) from k8s.io/kubernetes/test/integration/scheduler/util.go:210
I0212 07:24:30.620060  124047 reflector.go:170] Listing and watching *v1.Pod from k8s.io/kubernetes/test/integration/scheduler/util.go:210
I0212 07:24:30.623457  124047 wrap.go:47] GET /api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: (3.115098ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34398]
I0212 07:24:30.624324  124047 get.go:251] Starting watch for /api/v1/pods, rv=19071 labels= fields=status.phase!=Failed,status.phase!=Succeeded timeout=5m19s
I0212 07:24:30.719832  124047 shared_informer.go:123] caches populated
I0212 07:24:30.719870  124047 controller_utils.go:1028] Caches are synced for scheduler controller
I0212 07:24:30.720358  124047 reflector.go:132] Starting reflector *v1.ReplicationController (1s) from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.720386  124047 reflector.go:170] Listing and watching *v1.ReplicationController from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.720417  124047 reflector.go:132] Starting reflector *v1.StorageClass (1s) from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.720435  124047 reflector.go:170] Listing and watching *v1.StorageClass from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.720606  124047 reflector.go:132] Starting reflector *v1.Node (1s) from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.720622  124047 reflector.go:170] Listing and watching *v1.Node from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.720829  124047 reflector.go:132] Starting reflector *v1.Service (1s) from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.720845  124047 reflector.go:170] Listing and watching *v1.Service from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.720938  124047 reflector.go:132] Starting reflector *v1beta1.PodDisruptionBudget (1s) from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.720957  124047 reflector.go:170] Listing and watching *v1beta1.PodDisruptionBudget from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.721002  124047 reflector.go:132] Starting reflector *v1.StatefulSet (1s) from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.721017  124047 reflector.go:170] Listing and watching *v1.StatefulSet from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.721071  124047 reflector.go:132] Starting reflector *v1.PersistentVolume (1s) from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.721086  124047 reflector.go:170] Listing and watching *v1.PersistentVolume from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.721183  124047 reflector.go:132] Starting reflector *v1.PersistentVolumeClaim (1s) from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.721199  124047 reflector.go:170] Listing and watching *v1.PersistentVolumeClaim from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.722650  124047 wrap.go:47] GET /api/v1/replicationcontrollers?limit=500&resourceVersion=0: (634.731µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34658]
I0212 07:24:30.722664  124047 wrap.go:47] GET /api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: (474.592µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34772]
I0212 07:24:30.722651  124047 wrap.go:47] GET /apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: (509.086µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34760]
I0212 07:24:30.723168  124047 wrap.go:47] GET /apis/apps/v1/statefulsets?limit=500&resourceVersion=0: (439.19µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34768]
I0212 07:24:30.723217  124047 wrap.go:47] GET /api/v1/persistentvolumes?limit=500&resourceVersion=0: (421.299µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34770]
I0212 07:24:30.723586  124047 get.go:251] Starting watch for /api/v1/persistentvolumeclaims, rv=19071 labels= fields= timeout=6m52s
I0212 07:24:30.723710  124047 wrap.go:47] GET /api/v1/nodes?limit=500&resourceVersion=0: (443.402µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34762]
I0212 07:24:30.723862  124047 reflector.go:132] Starting reflector *v1.ReplicaSet (1s) from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.723876  124047 reflector.go:170] Listing and watching *v1.ReplicaSet from k8s.io/client-go/informers/factory.go:132
I0212 07:24:30.723950  124047 get.go:251] Starting watch for /api/v1/replicationcontrollers, rv=19071 labels= fields= timeout=8m17s
I0212 07:24:30.724261  124047 wrap.go:47] GET /api/v1/services?limit=500&resourceVersion=0: (459.305µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34764]
I0212 07:24:30.724376  124047 get.go:251] Starting watch for /apis/storage.k8s.io/v1/storageclasses, rv=19071 labels= fields= timeout=9m59s
I0212 07:24:30.724743  124047 wrap.go:47] GET /apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: (380.84µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34766]
I0212 07:24:30.725102  124047 wrap.go:47] GET /apis/apps/v1/replicasets?limit=500&resourceVersion=0: (464.477µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34770]
I0212 07:24:30.725179  124047 get.go:251] Starting watch for /api/v1/services, rv=19076 labels= fields= timeout=6m7s
I0212 07:24:30.725388  124047 get.go:251] Starting watch for /api/v1/persistentvolumes, rv=19071 labels= fields= timeout=7m18s
I0212 07:24:30.725548  124047 get.go:251] Starting watch for /api/v1/nodes, rv=19071 labels= fields= timeout=9m59s
I0212 07:24:30.725773  124047 get.go:251] Starting watch for /apis/policy/v1beta1/poddisruptionbudgets, rv=19071 labels= fields= timeout=6m55s
I0212 07:24:30.725848  124047 get.go:251] Starting watch for /apis/apps/v1/replicasets, rv=19071 labels= fields= timeout=7m45s
I0212 07:24:30.726833  124047 get.go:251] Starting watch for /apis/apps/v1/statefulsets, rv=19071 labels= fields= timeout=7m59s
I0212 07:24:30.820206  124047 shared_informer.go:123] caches populated
I0212 07:24:30.920435  124047 shared_informer.go:123] caches populated
I0212 07:24:31.020584  124047 shared_informer.go:123] caches populated
I0212 07:24:31.120781  124047 shared_informer.go:123] caches populated
I0212 07:24:31.220986  124047 shared_informer.go:123] caches populated
I0212 07:24:31.321215  124047 shared_informer.go:123] caches populated
I0212 07:24:31.421434  124047 shared_informer.go:123] caches populated
I0212 07:24:31.521774  124047 shared_informer.go:123] caches populated
I0212 07:24:31.622036  124047 shared_informer.go:123] caches populated
I0212 07:24:31.722286  124047 shared_informer.go:123] caches populated
I0212 07:24:31.724485  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:31.724956  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:31.725228  124047 wrap.go:47] POST /api/v1/nodes: (2.395918ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.728068  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.201815ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.729636  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:31.729675  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:31.729690  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:31.730630  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.092178ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.731851  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-0
I0212 07:24:31.731864  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-0
I0212 07:24:31.731993  124047 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-0", node "node1"
I0212 07:24:31.732006  124047 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I0212 07:24:31.732166  124047 factory.go:733] Attempting to bind rpod-0 to node1
I0212 07:24:31.732288  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-1
I0212 07:24:31.732295  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-1
I0212 07:24:31.732377  124047 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-1", node "node1"
I0212 07:24:31.732389  124047 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I0212 07:24:31.732437  124047 factory.go:733] Attempting to bind rpod-1 to node1
I0212 07:24:31.737107  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-1/binding: (2.430467ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34942]
I0212 07:24:31.737334  124047 scheduler.go:571] pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 07:24:31.739387  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-0/binding: (4.190285ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.739573  124047 scheduler.go:571] pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 07:24:31.740921  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (3.263669ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34942]
I0212 07:24:31.743130  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.802618ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34942]
I0212 07:24:31.836270  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-0: (2.281171ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34942]
I0212 07:24:31.938766  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-1: (1.650865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34942]
I0212 07:24:31.939146  124047 preemption_test.go:561] Creating the preemptor pod...
I0212 07:24:31.941468  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.04767ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34942]
I0212 07:24:31.941741  124047 preemption_test.go:567] Creating additional pods...
I0212 07:24:31.941752  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod
I0212 07:24:31.941766  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod
I0212 07:24:31.941867  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:31.941934  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:31.943933  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.75515ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.944123  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.084972ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34942]
I0212 07:24:31.944166  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.376971ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34954]
I0212 07:24:31.944877  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod/status: (2.20281ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34952]
I0212 07:24:31.946803  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.240339ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34942]
I0212 07:24:31.946895  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.49207ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34952]
I0212 07:24:31.947132  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:31.949195  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod/status: (1.699679ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.949533  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.329782ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34942]
I0212 07:24:31.951848  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.866604ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34942]
I0212 07:24:31.954101  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-1: (4.122564ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.954675  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.204363ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34942]
I0212 07:24:31.955620  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod
I0212 07:24:31.955658  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod
I0212 07:24:31.955794  124047 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod", node "node1"
I0212 07:24:31.955875  124047 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I0212 07:24:31.955930  124047 factory.go:733] Attempting to bind preemptor-pod to node1
I0212 07:24:31.955967  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4
I0212 07:24:31.955991  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4
I0212 07:24:31.956095  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:31.956141  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:31.957608  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.161322ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.958147  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (3.11518ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34942]
I0212 07:24:31.959386  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (2.709539ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34960]
I0212 07:24:31.959602  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4/status: (3.035802ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34958]
I0212 07:24:31.959822  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod/binding: (3.488002ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34956]
I0212 07:24:31.960032  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.024587ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.960102  124047 scheduler.go:571] pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 07:24:31.961243  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (988.909µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34958]
I0212 07:24:31.961618  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:31.961685  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.389458ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34942]
I0212 07:24:31.961787  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3
I0212 07:24:31.961808  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3
I0212 07:24:31.961923  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:31.961979  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:31.963077  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.636732ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.964646  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3/status: (2.065942ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34958]
I0212 07:24:31.964674  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.234142ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34960]
I0212 07:24:31.964741  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (1.422927ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.965065  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.528136ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34962]
I0212 07:24:31.966631  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.558678ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.966908  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (1.681523ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34960]
I0212 07:24:31.967153  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:31.967410  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6
I0212 07:24:31.967429  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6
I0212 07:24:31.967581  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:31.967637  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:31.968730  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.699571ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.969672  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.262302ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34964]
I0212 07:24:31.970060  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (2.090333ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34962]
I0212 07:24:31.970744  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6/status: (2.707898ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34960]
I0212 07:24:31.972796  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (3.652146ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.973457  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (2.19796ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34962]
I0212 07:24:31.973911  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:31.974085  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8
I0212 07:24:31.974104  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8
I0212 07:24:31.974229  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:31.974271  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:31.975411  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.109271ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.977126  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.560304ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34964]
I0212 07:24:31.977583  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.830115ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.978030  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (1.226371ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34966]
I0212 07:24:31.978726  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8/status: (4.253747ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34962]
I0212 07:24:31.979940  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.959351ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.980403  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (1.192032ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34966]
I0212 07:24:31.980648  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:31.980820  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10
I0212 07:24:31.980843  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10
I0212 07:24:31.980957  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:31.981005  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:31.982529  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.151945ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.982695  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.275421ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34964]
I0212 07:24:31.983109  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.280996ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34968]
I0212 07:24:31.984483  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10/status: (3.114595ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34966]
I0212 07:24:31.984530  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.574928ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34964]
I0212 07:24:31.986059  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.029661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.986428  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:31.986610  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13
I0212 07:24:31.986650  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13
I0212 07:24:31.986709  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.732913ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34968]
I0212 07:24:31.986784  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:31.986902  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:31.989061  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (1.379528ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.989679  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.9111ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34972]
I0212 07:24:31.989716  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.952889ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34970]
I0212 07:24:31.990008  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13/status: (2.719315ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34968]
I0212 07:24:31.991958  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (1.300081ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34968]
I0212 07:24:31.992103  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.777782ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34972]
I0212 07:24:31.992299  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:31.992515  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15
I0212 07:24:31.992535  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15
I0212 07:24:31.992629  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:31.992677  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:31.994971  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15/status: (2.068987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.995249  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.580743ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34974]
I0212 07:24:31.995443  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.869541ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34968]
I0212 07:24:31.996129  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.348342ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34976]
I0212 07:24:31.996792  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.252661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34776]
I0212 07:24:31.997648  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:31.997873  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18
I0212 07:24:31.997890  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.954402ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34968]
I0212 07:24:31.997903  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18
I0212 07:24:31.998089  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:31.998140  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:31.999640  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.253919ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34976]
I0212 07:24:32.000281  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.526699ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34980]
I0212 07:24:32.000450  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18/status: (2.034425ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34974]
I0212 07:24:32.000479  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.803662ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34978]
I0212 07:24:32.002441  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.424469ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34974]
I0212 07:24:32.002691  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.002860  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15
I0212 07:24:32.002881  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15
I0212 07:24:32.002961  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.934118ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34976]
I0212 07:24:32.002975  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.003024  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.004959  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.68287ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34976]
I0212 07:24:32.006751  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15/status: (2.74484ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34984]
I0212 07:24:32.006851  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.814488ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34982]
I0212 07:24:32.008538  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-15.15828d047182ecad: (4.620732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34974]
I0212 07:24:32.008781  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.594664ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34984]
I0212 07:24:32.009020  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.009552  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.198237ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34976]
I0212 07:24:32.009741  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:32.009764  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:32.009957  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.009999  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.011361  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (1.129038ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34974]
I0212 07:24:32.011835  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.790807ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34984]
I0212 07:24:32.012228  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.456742ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34988]
I0212 07:24:32.012642  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21/status: (2.028023ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34986]
I0212 07:24:32.014528  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.724353ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34984]
I0212 07:24:32.015121  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (1.266818ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34988]
I0212 07:24:32.015417  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.015608  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0
I0212 07:24:32.015629  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0
I0212 07:24:32.015787  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.015877  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.016801  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.884375ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34984]
I0212 07:24:32.017545  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (1.316549ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34974]
I0212 07:24:32.018519  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.871029ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34990]
I0212 07:24:32.018541  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0/status: (2.292677ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34988]
I0212 07:24:32.019567  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.129236ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34984]
I0212 07:24:32.020524  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (1.212861ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34990]
I0212 07:24:32.020832  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.021053  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2
I0212 07:24:32.021076  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2
I0212 07:24:32.021209  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.021261  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.021651  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.704927ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34984]
I0212 07:24:32.023471  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (1.846307ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34974]
I0212 07:24:32.023583  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2/status: (2.001306ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34990]
I0212 07:24:32.025022  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.960271ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34984]
I0212 07:24:32.025084  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.736313ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34992]
I0212 07:24:32.025438  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (1.08909ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34990]
I0212 07:24:32.025715  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.025878  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9
I0212 07:24:32.025900  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9
I0212 07:24:32.026130  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.026178  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.027516  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.070426ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34984]
I0212 07:24:32.027666  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (1.323733ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34990]
I0212 07:24:32.028177  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9/status: (1.725739ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34974]
I0212 07:24:32.029450  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.679331ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34994]
I0212 07:24:32.030070  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.115877ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34990]
I0212 07:24:32.030115  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (1.503634ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34974]
I0212 07:24:32.030410  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.030633  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13
I0212 07:24:32.030659  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13
I0212 07:24:32.030757  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.030797  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.032618  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.042766ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34994]
I0212 07:24:32.032769  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (1.233073ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34996]
I0212 07:24:32.033421  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13/status: (2.380025ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34984]
I0212 07:24:32.034942  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (1.067437ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34996]
I0212 07:24:32.035194  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.035360  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17
I0212 07:24:32.035397  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-13.15828d04712ac9c3: (3.054544ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34998]
I0212 07:24:32.035397  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17
I0212 07:24:32.035591  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.035662  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.035681  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.927441ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34994]
I0212 07:24:32.037735  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.744408ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34994]
I0212 07:24:32.038330  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.202018ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34984]
I0212 07:24:32.038395  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (2.540031ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34996]
I0212 07:24:32.038670  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17/status: (2.304215ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35000]
I0212 07:24:32.040602  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.457272ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35000]
I0212 07:24:32.040844  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.040985  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6
I0212 07:24:32.040997  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6
I0212 07:24:32.041111  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.041146  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.042932  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (1.540372ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34994]
I0212 07:24:32.043568  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6/status: (2.101027ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35000]
I0212 07:24:32.043958  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (5.069035ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34984]
I0212 07:24:32.044743  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-6.15828d047004de2e: (2.548593ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35002]
I0212 07:24:32.045806  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (1.192459ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34994]
I0212 07:24:32.045939  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.558399ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35000]
I0212 07:24:32.046137  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.046300  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:32.046319  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:32.046416  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.046526  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.048239  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.802791ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34994]
I0212 07:24:32.049483  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19/status: (2.367618ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35002]
I0212 07:24:32.049483  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.544665ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35004]
I0212 07:24:32.049707  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.980886ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35006]
I0212 07:24:32.050185  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.414984ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34994]
I0212 07:24:32.051731  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.16532ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35006]
I0212 07:24:32.052379  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.052553  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:32.052579  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:32.052620  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.839125ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35004]
I0212 07:24:32.052777  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.052830  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.054340  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (1.147594ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35006]
I0212 07:24:32.055394  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.311518ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35004]
I0212 07:24:32.055524  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.10852ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35010]
I0212 07:24:32.055817  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20/status: (2.326274ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35008]
I0212 07:24:32.057526  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (1.242675ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35008]
I0212 07:24:32.057880  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.057890  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.094364ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35004]
I0212 07:24:32.058088  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5
I0212 07:24:32.058109  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5
I0212 07:24:32.058226  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.058293  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.060362  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (1.358428ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35012]
I0212 07:24:32.061222  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.09151ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35014]
I0212 07:24:32.061412  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.881079ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35008]
I0212 07:24:32.061702  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5/status: (3.135984ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35006]
I0212 07:24:32.063523  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (1.261367ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35006]
I0212 07:24:32.063847  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.063882  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.891822ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35014]
I0212 07:24:32.064061  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11
I0212 07:24:32.064083  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11
I0212 07:24:32.064268  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.064340  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.066158  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.713855ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35006]
I0212 07:24:32.066586  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.521908ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35018]
I0212 07:24:32.067614  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11/status: (2.211387ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35016]
I0212 07:24:32.068390  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.730631ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35006]
I0212 07:24:32.069416  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (4.846622ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35012]
I0212 07:24:32.069910  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (1.065065ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35016]
I0212 07:24:32.070249  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.070702  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18
I0212 07:24:32.070724  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18
I0212 07:24:32.070811  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.070862  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.070897  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.013609ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35018]
I0212 07:24:32.072631  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.46369ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35016]
I0212 07:24:32.073221  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18/status: (2.01619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35012]
I0212 07:24:32.074447  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-18.15828d0471d63fc2: (3.0555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35018]
I0212 07:24:32.074547  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.846761ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35020]
I0212 07:24:32.075014  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.024423ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35012]
I0212 07:24:32.075282  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.075472  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12
I0212 07:24:32.075517  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12
I0212 07:24:32.075635  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.075684  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.077262  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (1.183286ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35016]
I0212 07:24:32.077817  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.844415ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35018]
I0212 07:24:32.077891  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.532522ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35022]
I0212 07:24:32.078015  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12/status: (1.919589ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35012]
I0212 07:24:32.079763  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (1.31514ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35018]
I0212 07:24:32.080090  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.080262  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1
I0212 07:24:32.080280  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1
I0212 07:24:32.080407  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.080482  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.082482  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.059701ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35024]
I0212 07:24:32.082717  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.894965ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35016]
I0212 07:24:32.084097  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1/status: (3.373454ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35018]
I0212 07:24:32.086161  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.534351ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35016]
I0212 07:24:32.086476  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.086715  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4
I0212 07:24:32.086756  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4
I0212 07:24:32.086873  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.086923  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.088446  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (1.21844ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35024]
I0212 07:24:32.089561  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4/status: (2.328207ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35016]
I0212 07:24:32.090413  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-4.15828d046f557227: (2.676078ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35026]
I0212 07:24:32.091606  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (1.392546ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35016]
I0212 07:24:32.091953  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.092179  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8
I0212 07:24:32.092204  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8
I0212 07:24:32.092351  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.092415  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.094010  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (1.297445ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35024]
I0212 07:24:32.094614  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8/status: (1.914145ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35026]
I0212 07:24:32.096558  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (1.464333ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35026]
I0212 07:24:32.096655  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-8.15828d04706a0c12: (3.158987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35028]
I0212 07:24:32.096849  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.097054  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14
I0212 07:24:32.097077  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14
I0212 07:24:32.097195  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.097256  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.098871  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (1.204313ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35024]
I0212 07:24:32.099638  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.526381ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35030]
I0212 07:24:32.099991  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14/status: (2.315634ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35026]
I0212 07:24:32.101646  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (1.187982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35030]
I0212 07:24:32.101922  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.102106  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7
I0212 07:24:32.102126  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7
I0212 07:24:32.102263  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.102339  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.104082  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (1.389865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35024]
I0212 07:24:32.104419  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.429942ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35032]
I0212 07:24:32.105301  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7/status: (2.634016ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35030]
I0212 07:24:32.107215  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (1.399192ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35032]
I0212 07:24:32.107534  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.107801  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10
I0212 07:24:32.107829  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10
I0212 07:24:32.108004  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.108066  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.111376  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (2.886698ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35024]
I0212 07:24:32.111877  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10/status: (3.421213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35032]
I0212 07:24:32.112839  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-10.15828d0470d0d4cf: (3.520632ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35034]
I0212 07:24:32.113306  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.106684ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35032]
I0212 07:24:32.113620  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.113858  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16
I0212 07:24:32.113897  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16
I0212 07:24:32.114013  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.114137  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.115980  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (1.222655ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35024]
I0212 07:24:32.117092  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.893581ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35036]
I0212 07:24:32.117236  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16/status: (2.163038ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35034]
I0212 07:24:32.118756  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (1.069868ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35034]
I0212 07:24:32.119024  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.119176  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16
I0212 07:24:32.119192  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16
I0212 07:24:32.119267  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.119340  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.120848  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (1.194148ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35024]
I0212 07:24:32.121349  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16/status: (1.754161ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35036]
I0212 07:24:32.123250  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-16.15828d0478c02dfb: (3.061443ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35038]
I0212 07:24:32.124064  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (2.080056ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35036]
I0212 07:24:32.124463  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.124704  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12
I0212 07:24:32.124719  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12
I0212 07:24:32.124797  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.124855  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.126742  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (1.59413ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35024]
I0212 07:24:32.126820  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12/status: (1.711046ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35038]
I0212 07:24:32.128717  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-12.15828d04767590ae: (2.607596ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35040]
I0212 07:24:32.129233  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (1.500536ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35038]
I0212 07:24:32.129563  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.129739  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11
I0212 07:24:32.129756  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11
I0212 07:24:32.129867  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.129963  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.132149  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11/status: (1.916289ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35040]
I0212 07:24:32.132829  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (2.020912ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35024]
I0212 07:24:32.133916  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-11.15828d0475c82521: (3.066075ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35042]
I0212 07:24:32.134165  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (1.47243ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35040]
I0212 07:24:32.134456  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.134659  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:32.134675  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:32.134767  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.134829  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.136735  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (1.696083ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35042]
I0212 07:24:32.137371  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.926895ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35044]
I0212 07:24:32.137880  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45/status: (2.774282ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35024]
I0212 07:24:32.139943  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (1.412881ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35044]
I0212 07:24:32.140319  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.140571  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:32.140595  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:32.140699  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.140763  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.143008  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.37179ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35042]
I0212 07:24:32.143640  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.919739ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35046]
I0212 07:24:32.143840  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44/status: (2.734786ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35044]
I0212 07:24:32.145789  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.425691ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35046]
I0212 07:24:32.146136  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.146385  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:32.146402  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:32.146547  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.146630  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.148791  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (1.761346ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35042]
I0212 07:24:32.149170  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45/status: (2.129302ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35046]
I0212 07:24:32.150739  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (1.058171ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35046]
I0212 07:24:32.150775  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-45.15828d0479fbd86e: (2.891079ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35048]
I0212 07:24:32.151053  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.151261  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:32.151282  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:32.151402  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.151464  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.153731  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.475258ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35046]
I0212 07:24:32.154272  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44/status: (2.01145ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35042]
I0212 07:24:32.155175  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-44.15828d047a5686d0: (2.741777ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35050]
I0212 07:24:32.156156  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.384727ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35042]
I0212 07:24:32.156640  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.156820  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:32.156839  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:32.156926  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.156975  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.158599  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.364777ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35050]
I0212 07:24:32.159592  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.91207ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35052]
I0212 07:24:32.159643  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32/status: (2.436259ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35046]
I0212 07:24:32.161321  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.208145ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35052]
I0212 07:24:32.161697  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.161966  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:32.161991  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:32.162125  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.162184  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.163943  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (1.417453ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35050]
I0212 07:24:32.164977  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.026106ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35054]
I0212 07:24:32.165139  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41/status: (2.647967ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35052]
I0212 07:24:32.166895  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (1.232844ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35054]
I0212 07:24:32.167351  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.167555  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:32.167574  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:32.167673  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.167729  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.169835  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.720848ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35054]
I0212 07:24:32.170916  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32/status: (2.788495ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35050]
I0212 07:24:32.171126  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-32.15828d047b4df752: (2.5049ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35056]
I0212 07:24:32.172436  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.059851ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35050]
I0212 07:24:32.172686  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.172955  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:32.172979  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:32.173110  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.173171  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.174879  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (1.351309ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35050]
I0212 07:24:32.175280  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41/status: (1.74226ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35054]
I0212 07:24:32.176700  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-41.15828d047b9d679f: (2.54738ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35058]
I0212 07:24:32.177161  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (1.082024ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35054]
I0212 07:24:32.177527  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.177781  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:32.177801  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:32.177920  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.177981  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.181904  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (2.301261ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35062]
I0212 07:24:32.181947  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (3.156193ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35064]
I0212 07:24:32.182443  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30/status: (2.624831ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35058]
I0212 07:24:32.182786  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (4.343284ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35050]
I0212 07:24:32.184776  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.924871ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35064]
I0212 07:24:32.185091  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.185355  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:32.185385  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:32.185559  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.185617  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.187485  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.215816ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35070]
I0212 07:24:32.187722  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.738625ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35062]
I0212 07:24:32.187955  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40/status: (2.042932ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35050]
I0212 07:24:32.189753  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.323473ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35062]
I0212 07:24:32.190006  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.190196  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:32.190216  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:32.190323  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.190380  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.192401  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.201867ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35062]
I0212 07:24:32.193289  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30/status: (2.091704ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35070]
I0212 07:24:32.194394  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-30.15828d047c8e42e1: (3.04852ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35072]
I0212 07:24:32.195675  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.388699ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35070]
I0212 07:24:32.196137  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.196416  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:32.196440  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:32.196602  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.196666  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.199013  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (2.04513ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35062]
I0212 07:24:32.199253  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40/status: (2.306681ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35072]
I0212 07:24:32.201001  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.260928ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35072]
I0212 07:24:32.201133  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-40.15828d047d02f52d: (3.338514ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35074]
I0212 07:24:32.201319  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.201522  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:32.201543  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:32.201662  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.201724  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.203296  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (1.206277ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35062]
I0212 07:24:32.203963  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26/status: (1.990689ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35072]
I0212 07:24:32.204288  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.813588ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35076]
I0212 07:24:32.205784  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (1.294797ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35072]
I0212 07:24:32.206098  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.206292  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:32.206312  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:32.206423  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.206479  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.207983  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.251925ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35076]
I0212 07:24:32.209068  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.806373ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35078]
I0212 07:24:32.209193  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38/status: (2.462562ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35062]
I0212 07:24:32.211159  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.24301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35078]
I0212 07:24:32.211430  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.211640  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:32.211663  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:32.211760  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.211843  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.213552  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (1.394277ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35076]
I0212 07:24:32.214156  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26/status: (2.040093ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35078]
I0212 07:24:32.215677  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-26.15828d047df89ef2: (2.466287ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35080]
I0212 07:24:32.216410  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (1.737326ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35078]
I0212 07:24:32.216730  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.216951  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:32.216973  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:32.217107  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.217161  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.218772  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.327442ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35076]
I0212 07:24:32.219195  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38/status: (1.724356ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35080]
I0212 07:24:32.221064  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-38.15828d047e415433: (2.858402ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35082]
I0212 07:24:32.221116  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.454866ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35080]
I0212 07:24:32.221392  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.221701  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:32.221725  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:32.221824  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.221885  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.223339  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.116762ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35076]
I0212 07:24:32.224167  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.508247ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35084]
I0212 07:24:32.224605  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35/status: (2.392522ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35080]
I0212 07:24:32.226424  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.149551ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35084]
I0212 07:24:32.226695  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.226959  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17
I0212 07:24:32.227003  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17
I0212 07:24:32.227134  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.227194  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.228891  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.393291ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35076]
I0212 07:24:32.229259  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17/status: (1.831335ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35084]
I0212 07:24:32.230861  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-17.15828d047412a489: (2.819971ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35086]
I0212 07:24:32.231098  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.207694ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35084]
I0212 07:24:32.231408  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.231582  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:32.231601  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:32.231716  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.231826  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.233389  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.295876ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35076]
I0212 07:24:32.234054  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35/status: (1.947932ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35086]
I0212 07:24:32.235819  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.05402ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35086]
I0212 07:24:32.236026  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-35.15828d047f2c5ac7: (3.124907ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35088]
I0212 07:24:32.236125  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.236287  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:32.236308  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:32.236422  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.236475  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.238419  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.501266ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35076]
I0212 07:24:32.238843  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.664058ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35090]
I0212 07:24:32.239401  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34/status: (2.481006ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35086]
I0212 07:24:32.241024  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.265245ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35086]
I0212 07:24:32.241316  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.241477  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9
I0212 07:24:32.241512  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9
I0212 07:24:32.241587  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.241657  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.243228  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (1.206242ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35090]
I0212 07:24:32.243719  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9/status: (1.715975ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35086]
I0212 07:24:32.245758  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (1.545037ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35086]
I0212 07:24:32.246129  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.246290  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:32.246312  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:32.246477  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.246560  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-9.15828d0473821ef2: (2.782816ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35092]
I0212 07:24:32.246568  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.248070  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.209957ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35086]
I0212 07:24:32.248853  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34/status: (1.987534ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35090]
I0212 07:24:32.250028  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-34.15828d04800ae6c4: (2.26945ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35092]
I0212 07:24:32.250392  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.14214ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35090]
I0212 07:24:32.250770  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.250949  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:32.250968  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:32.251068  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.251130  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.252879  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (1.103519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35086]
I0212 07:24:32.253915  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.6406ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35094]
I0212 07:24:32.254402  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31/status: (2.663459ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35092]
I0212 07:24:32.256279  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (1.304588ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35094]
I0212 07:24:32.256610  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.256799  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2
I0212 07:24:32.256824  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2
I0212 07:24:32.256951  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.257025  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.258736  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (1.412411ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35086]
I0212 07:24:32.259565  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2/status: (2.264465ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35094]
I0212 07:24:32.260629  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-2.15828d0473371a42: (2.558127ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35096]
I0212 07:24:32.261206  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (1.103267ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35094]
I0212 07:24:32.261596  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.261794  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:32.261813  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:32.261916  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.261976  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.263803  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (1.510353ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35086]
I0212 07:24:32.264020  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31/status: (1.741387ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35096]
I0212 07:24:32.265630  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (1.163086ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35096]
I0212 07:24:32.265880  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.266082  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:32.266114  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:32.266209  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-31.15828d0480ea9b98: (2.418824ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35098]
I0212 07:24:32.266205  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.266241  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.267869  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (1.363582ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35096]
I0212 07:24:32.269480  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.557688ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35100]
I0212 07:24:32.270156  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29/status: (3.669169ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35086]
I0212 07:24:32.272034  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (1.309185ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35100]
I0212 07:24:32.272320  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.272617  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0
I0212 07:24:32.272636  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0
I0212 07:24:32.272752  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.272812  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.274594  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (1.473658ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35096]
I0212 07:24:32.275072  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0/status: (1.951023ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35100]
I0212 07:24:32.276030  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-0.15828d0472e4eb66: (2.375511ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35102]
I0212 07:24:32.276806  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (1.234616ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35100]
I0212 07:24:32.277110  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.277257  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:32.277276  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:32.277393  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.277439  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.279439  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (1.698935ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35096]
I0212 07:24:32.279571  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29/status: (1.830485ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35102]
I0212 07:24:32.280783  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-29.15828d0481d1437c: (2.454584ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35104]
I0212 07:24:32.282287  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (1.340454ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35102]
I0212 07:24:32.282617  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.282822  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:32.282846  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:32.283001  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.283068  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.283892  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.345492ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35104]
I0212 07:24:32.284174  124047 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I0212 07:24:32.284363  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (1.05017ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35096]
I0212 07:24:32.286372  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.91522ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35104]
I0212 07:24:32.286429  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (1.716604ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35106]
I0212 07:24:32.286455  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27/status: (3.132406ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35102]
I0212 07:24:32.288205  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.285393ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35108]
I0212 07:24:32.288735  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (1.391602ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35102]
I0212 07:24:32.289101  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.289338  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:32.289357  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:32.289517  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.289570  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.290008  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (1.353797ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35108]
I0212 07:24:32.291832  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.712306ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35110]
I0212 07:24:32.292309  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (1.92736ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35108]
I0212 07:24:32.293221  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25/status: (3.376307ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35102]
I0212 07:24:32.293484  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (3.679308ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35096]
I0212 07:24:32.295023  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (1.357146ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35108]
I0212 07:24:32.295031  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (1.095575ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35102]
I0212 07:24:32.295409  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.295669  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:32.295689  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:32.295854  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.295936  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.299596  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (4.117259ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35108]
I0212 07:24:32.301054  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27/status: (1.90603ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35112]
I0212 07:24:32.301054  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (1.486645ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35110]
I0212 07:24:32.301383  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (1.205423ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35116]
I0212 07:24:32.302999  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-27.15828d0482d1b200: (2.869942ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35108]
I0212 07:24:32.303080  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (1.279818ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35114]
I0212 07:24:32.303852  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (2.017051ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35110]
I0212 07:24:32.304203  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.304382  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:32.304429  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:32.304561  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.304609  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.304725  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (1.251813ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35114]
I0212 07:24:32.306340  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (1.181419ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35108]
I0212 07:24:32.306575  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (1.372798ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35114]
I0212 07:24:32.307087  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25/status: (2.250218ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35110]
I0212 07:24:32.308707  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (1.147618ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35110]
I0212 07:24:32.308843  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.650774ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35108]
I0212 07:24:32.309230  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.309420  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:32.309436  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:32.309618  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-25.15828d0483352de0: (3.973012ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35118]
I0212 07:24:32.309623  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.309672  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.310572  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (1.101207ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35108]
I0212 07:24:32.312598  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (2.453869ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35118]
I0212 07:24:32.312785  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21/status: (2.331898ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35114]
I0212 07:24:32.313007  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (1.991274ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35108]
I0212 07:24:32.313626  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-21.15828d04728b477d: (2.901159ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35120]
I0212 07:24:32.314703  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (1.456803ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35114]
I0212 07:24:32.314961  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.315136  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:32.315188  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:32.315382  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.315455  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.315734  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (2.096529ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35108]
I0212 07:24:32.316997  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (1.201525ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35118]
I0212 07:24:32.318298  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.221766ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35108]
I0212 07:24:32.318445  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43/status: (2.709504ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35120]
I0212 07:24:32.318595  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (2.162467ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35124]
I0212 07:24:32.320136  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (1.272233ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35118]
I0212 07:24:32.320293  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.046252ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35122]
I0212 07:24:32.320643  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.320926  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1
I0212 07:24:32.320950  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1
I0212 07:24:32.321198  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.321248  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.322081  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (1.309499ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35122]
I0212 07:24:32.322464  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.034913ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35118]
I0212 07:24:32.323683  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1/status: (1.992881ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35126]
I0212 07:24:32.323806  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.060878ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35118]
I0212 07:24:32.325336  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.005339ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35126]
I0212 07:24:32.325390  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.178033ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35118]
I0212 07:24:32.325886  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.325964  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-1.15828d0476bec051: (3.412981ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35122]
I0212 07:24:32.326128  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:32.326150  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:32.326249  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.326338  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.327299  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.304854ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35118]
I0212 07:24:32.328744  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43/status: (1.851598ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35122]
I0212 07:24:32.331310  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (3.566508ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35118]
I0212 07:24:32.332246  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-43.15828d0484c0142b: (5.135224ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35128]
I0212 07:24:32.332603  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (1.439395ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35122]
I0212 07:24:32.332614  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (2.020336ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35126]
I0212 07:24:32.333078  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.333326  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:32.333349  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:32.333436  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.333572  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.334066  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (1.145233ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35118]
I0212 07:24:32.335227  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.253665ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35128]
I0212 07:24:32.336290  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.364239ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35118]
I0212 07:24:32.336325  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.863796ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35130]
I0212 07:24:32.336681  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37/status: (2.50834ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35126]
I0212 07:24:32.338223  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.340874ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35130]
I0212 07:24:32.338289  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.070138ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35126]
I0212 07:24:32.338624  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.338795  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:32.338813  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:32.338947  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.339060  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.339898  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (1.273081ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35130]
I0212 07:24:32.341123  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.11988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35132]
I0212 07:24:32.341217  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19/status: (1.879988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35128]
I0212 07:24:32.341715  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (1.424989ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35130]
I0212 07:24:32.342794  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.179717ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35128]
I0212 07:24:32.342911  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-19.15828d0474b7f7f0: (2.984253ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35134]
I0212 07:24:32.343238  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (1.067566ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35130]
I0212 07:24:32.343263  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.343426  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:32.343450  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:32.343600  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.343684  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.345084  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (1.322476ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35134]
I0212 07:24:32.345201  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.267257ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35132]
I0212 07:24:32.346077  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37/status: (1.962934ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35136]
I0212 07:24:32.346899  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.23074ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35132]
I0212 07:24:32.347005  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-37.15828d0485d346e8: (2.508233ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35138]
I0212 07:24:32.347932  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.22702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35136]
I0212 07:24:32.348276  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.348557  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (1.191426ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35132]
I0212 07:24:32.348559  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:32.348644  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:32.348726  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.348785  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.354877  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (4.865288ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35142]
I0212 07:24:32.355089  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (5.810576ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35140]
I0212 07:24:32.355164  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (6.170812ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35138]
I0212 07:24:32.355708  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33/status: (6.692106ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35134]
I0212 07:24:32.357082  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (1.228572ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35140]
I0212 07:24:32.357598  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.210344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35134]
I0212 07:24:32.357880  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.358060  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:32.358083  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:32.358208  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.358283  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.358798  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.20403ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35140]
I0212 07:24:32.360130  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.197427ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35142]
I0212 07:24:32.360588  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.189057ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35140]
I0212 07:24:32.361472  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.488859ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35146]
I0212 07:24:32.361782  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28/status: (2.808921ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35134]
I0212 07:24:32.362623  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.048687ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35140]
I0212 07:24:32.363458  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.268316ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35146]
I0212 07:24:32.363807  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.363966  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:32.363988  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:32.364087  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.364129  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.364557  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.474957ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35140]
I0212 07:24:32.366136  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.390209ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35142]
I0212 07:24:32.366304  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.098799ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35140]
I0212 07:24:32.367275  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33/status: (2.200083ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35146]
I0212 07:24:32.367464  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-33.15828d0486bcbc9b: (2.150264ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35148]
I0212 07:24:32.368734  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.3005ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35140]
I0212 07:24:32.368816  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.138422ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35146]
I0212 07:24:32.369104  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.369276  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:32.369299  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:32.369410  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.369468  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.371614  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (2.338901ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35148]
I0212 07:24:32.372164  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.736429ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35150]
I0212 07:24:32.372410  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28/status: (2.520728ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35142]
I0212 07:24:32.372981  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-28.15828d04874d9dfd: (2.672793ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35152]
I0212 07:24:32.373403  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.304231ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35148]
I0212 07:24:32.374995  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.617952ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35142]
I0212 07:24:32.375296  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.375532  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:32.375553  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:32.375580  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.627526ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35152]
I0212 07:24:32.375680  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.375836  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.377371  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (1.255668ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35142]
I0212 07:24:32.377895  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (1.339732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35154]
I0212 07:24:32.378515  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24/status: (2.162797ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35150]
I0212 07:24:32.379422  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.495755ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35156]
I0212 07:24:32.380246  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (1.07606ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35150]
I0212 07:24:32.380250  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.28369ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35142]
I0212 07:24:32.380545  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.380697  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:32.380717  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:32.380935  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.380991  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.381856  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (1.121293ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35156]
I0212 07:24:32.382732  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.480651ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35154]
I0212 07:24:32.383351  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.060209ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35156]
I0212 07:24:32.383763  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.109625ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35158]
I0212 07:24:32.384637  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47/status: (2.920952ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35160]
I0212 07:24:32.385241  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (1.474534ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35156]
I0212 07:24:32.386602  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.194259ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35158]
I0212 07:24:32.386865  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.386901  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.189362ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35156]
I0212 07:24:32.387123  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:32.387144  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:32.387258  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.387312  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.388453  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.132485ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35158]
I0212 07:24:32.388808  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (1.198316ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35154]
I0212 07:24:32.390563  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.286559ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35154]
I0212 07:24:32.390726  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24/status: (2.198192ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35162]
I0212 07:24:32.391117  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-24.15828d0488588089: (2.728512ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35164]
I0212 07:24:32.392347  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.241121ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35154]
I0212 07:24:32.392421  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (1.086918ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35164]
I0212 07:24:32.392590  124047 preemption_test.go:598] Cleaning up all pods...
I0212 07:24:32.392746  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.392916  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:32.392933  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:32.393022  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.393079  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.397121  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (3.557668ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35166]
I0212 07:24:32.397223  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-47.15828d0488a81edf: (3.154435ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35168]
I0212 07:24:32.398571  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47/status: (4.835594ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35158]
I0212 07:24:32.399337  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (6.53539ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35154]
I0212 07:24:32.400419  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.237656ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35168]
I0212 07:24:32.400855  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.401066  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:32.401121  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:32.401225  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.401338  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.402955  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.32281ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35168]
I0212 07:24:32.403773  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.368894ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35170]
I0212 07:24:32.404565  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49/status: (2.974938ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35166]
I0212 07:24:32.405384  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (5.58284ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35154]
I0212 07:24:32.406410  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.378102ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35170]
I0212 07:24:32.406715  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.407018  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:32.407055  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:32.407168  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.407235  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.410118  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.892223ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35172]
I0212 07:24:32.410418  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.678333ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35168]
I0212 07:24:32.410780  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (5.015412ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35154]
I0212 07:24:32.411545  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23/status: (4.108762ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35170]
I0212 07:24:32.413349  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.114766ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35170]
I0212 07:24:32.413654  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.413882  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:32.413895  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:32.413988  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.414032  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.415562  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (4.312053ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35168]
I0212 07:24:32.415940  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.650933ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35172]
I0212 07:24:32.416323  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49/status: (2.080914ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35170]
I0212 07:24:32.418777  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.379967ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35170]
I0212 07:24:32.418908  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-49.15828d0489de3c51: (3.719019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35174]
I0212 07:24:32.419091  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.419222  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:32.419233  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:32.419392  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.419466  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.420644  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (4.824417ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35168]
I0212 07:24:32.421855  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.714987ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35176]
I0212 07:24:32.422162  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42/status: (2.395323ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35170]
I0212 07:24:32.422386  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (2.63125ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35172]
I0212 07:24:32.423818  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.264955ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35170]
I0212 07:24:32.424398  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.424587  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:32.424611  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:32.424749  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.424813  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.426480  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.28807ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35176]
I0212 07:24:32.427633  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.673213ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35178]
I0212 07:24:32.427636  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (6.489962ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35168]
I0212 07:24:32.429299  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48/status: (4.082693ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35172]
I0212 07:24:32.431484  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.566915ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35172]
I0212 07:24:32.431760  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.431910  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:32.432008  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:32.432161  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.432204  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (4.112718ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35178]
I0212 07:24:32.432215  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.434089  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.509346ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35176]
I0212 07:24:32.434451  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42/status: (1.85341ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35172]
I0212 07:24:32.436609  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-42.15828d048af331a4: (2.989061ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35180]
I0212 07:24:32.437519  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.704888ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35172]
I0212 07:24:32.438057  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.438286  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (5.720486ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35178]
I0212 07:24:32.438377  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:32.438400  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:32.438540  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.438586  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.440308  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.312324ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35176]
I0212 07:24:32.441171  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.054921ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35184]
I0212 07:24:32.442390  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46/status: (2.722878ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35182]
I0212 07:24:32.444055  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.194659ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35184]
I0212 07:24:32.444306  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.444463  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14
I0212 07:24:32.444483  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14
I0212 07:24:32.444617  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.444676  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.445319  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (6.356969ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35180]
I0212 07:24:32.447520  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (2.142871ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35184]
I0212 07:24:32.447967  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-14.15828d0477beabda: (2.594037ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35176]
I0212 07:24:32.448931  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14/status: (2.478357ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35186]
I0212 07:24:32.449971  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (4.331927ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35180]
I0212 07:24:32.450690  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (1.289429ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35176]
I0212 07:24:32.451027  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.451255  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:32.451280  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:32.451423  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.451525  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.454836  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46/status: (1.855048ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35176]
I0212 07:24:32.456412  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-46.15828d048c1700d1: (2.725907ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35188]
I0212 07:24:32.460276  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (7.337001ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35184]
I0212 07:24:32.460621  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (5.385648ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35176]
I0212 07:24:32.460863  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.461775  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:32.461800  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:32.461909  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.461972  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.462377  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (12.093791ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35180]
I0212 07:24:32.464077  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.809393ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35188]
I0212 07:24:32.464552  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.885512ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35180]
I0212 07:24:32.464754  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36/status: (2.458531ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35184]
I0212 07:24:32.467163  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.631622ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35180]
I0212 07:24:32.467477  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.467656  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:32.467674  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:32.467809  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.467865  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.468274  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (5.455905ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35190]
I0212 07:24:32.469893  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.350953ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35192]
I0212 07:24:32.470473  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (2.429072ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35180]
I0212 07:24:32.470479  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22/status: (2.047964ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35188]
I0212 07:24:32.472213  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.29507ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35188]
I0212 07:24:32.472591  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.472600  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (3.967964ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35190]
I0212 07:24:32.472884  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:32.472906  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:32.473022  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.473091  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.474605  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.371483ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35192]
I0212 07:24:32.475480  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36/status: (1.940468ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.477403  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-36.15828d048d7bc74c: (2.235736ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35196]
I0212 07:24:32.477479  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (4.327739ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35188]
I0212 07:24:32.477647  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.454733ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.477891  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.478063  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:32.478082  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:32.478191  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.478313  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.480019  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.528081ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.481348  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22/status: (2.623596ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35192]
I0212 07:24:32.481702  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (3.936627ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35196]
I0212 07:24:32.482242  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-22.15828d048dd5b93f: (3.052742ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35198]
I0212 07:24:32.483544  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.227291ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.483987  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.484168  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:32.484182  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:32.484250  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.484299  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.486345  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20/status: (1.791525ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.486441  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (1.84396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35198]
I0212 07:24:32.487452  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (5.155168ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35196]
I0212 07:24:32.487823  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-20.15828d047518a7d1: (2.247826ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35200]
I0212 07:24:32.488216  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (1.333208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35198]
I0212 07:24:32.488860  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.489025  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:32.489059  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:32.489191  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.489244  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.490613  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.077561ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.491522  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48/status: (1.989075ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35200]
I0212 07:24:32.492407  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (4.627768ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35196]
I0212 07:24:32.492458  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-48.15828d048b44c850: (2.227238ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35202]
I0212 07:24:32.493306  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.12468ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35200]
I0212 07:24:32.493623  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.493818  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:32.493837  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:32.493918  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.493966  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.495344  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.140384ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.496538  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23/status: (2.303314ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35202]
I0212 07:24:32.497549  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (4.838856ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35196]
I0212 07:24:32.501989  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-23.15828d048a3865e3: (6.739278ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35204]
I0212 07:24:32.515388  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (17.710506ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35202]
I0212 07:24:32.516482  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.517017  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:32.517059  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:32.517633  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.517713  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.522739  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (3.336546ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.523085  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (9.946409ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35196]
I0212 07:24:32.524279  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (3.208671ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35206]
I0212 07:24:32.530461  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39/status: (11.632919ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35204]
I0212 07:24:32.534171  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (3.038325ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35206]
I0212 07:24:32.534524  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.537269  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (13.384824ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35196]
I0212 07:24:32.542216  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:32.542236  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:32.542516  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:32.542595  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:32.548262  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (4.463255ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35208]
I0212 07:24:32.549161  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39/status: (6.159818ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.549453  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (11.667431ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35206]
I0212 07:24:32.549680  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-39.15828d0490ce2499: (5.649852ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.551475  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.544843ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.551907  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:32.554985  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:32.555033  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:32.555691  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:32.555941  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:32.557327  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.878376ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.558730  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (8.782621ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35208]
I0212 07:24:32.561757  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (3.827116ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.562778  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:32.562826  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:32.565795  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.630023ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.573253  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (11.004467ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35208]
I0212 07:24:32.579199  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:32.579242  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:32.583192  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (9.421932ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.584207  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (4.450078ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.587933  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:32.587974  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:32.593515  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (5.129459ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
E0212 07:24:32.601253  124047 event.go:200] Unable to write event: 'Patch http://127.0.0.1:44167/api/v1/namespaces/prebind-plugin216b4599-2e97-11e9-a750-0242ac110002/events/test-pod.15828cf9c1dc6398: dial tcp 127.0.0.1:44167: connect: connection refused' (may retry after sleeping)
I0212 07:24:32.602568  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (18.920876ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.616017  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (12.867242ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.619691  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:32.619785  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:32.621100  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (4.615589ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.621841  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.695189ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.630119  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:32.630169  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:32.630938  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (9.454014ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.632106  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.549069ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.634178  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:32.634219  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:32.635638  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (3.875722ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.636015  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.496094ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.638247  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:32.638284  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:32.641406  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.84213ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.641969  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (6.062841ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.644816  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:32.644861  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:32.646062  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (3.69377ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.646591  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.499581ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.648933  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:32.648978  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:32.650405  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (3.956903ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.650735  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.507717ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.653118  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:32.653150  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:32.654634  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (3.857655ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.654831  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.467457ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.657917  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:32.657966  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:32.659151  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (3.892975ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.659696  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.412521ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.661957  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:32.661994  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:32.663125  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (3.531829ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.663526  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.277895ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.665875  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:32.665927  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:32.667963  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.782507ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.668355  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (4.932832ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.671466  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:32.671520  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:32.672718  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (4.011772ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.673405  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.629125ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.680160  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:32.680203  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:32.681415  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (7.704779ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.682257  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.703766ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.684485  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:32.684560  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:32.685661  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (3.922026ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.686285  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.432315ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.688446  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:32.688475  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:32.689641  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (3.629028ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.690112  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.35904ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.692185  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:32.692221  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:32.693321  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (3.375705ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.693775  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.309055ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.695864  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:32.695890  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:32.697302  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.168937ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.697556  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (3.909283ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.700640  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:32.700689  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:32.702311  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.36994ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.702345  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (4.313984ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.705381  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:32.705415  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:32.706590  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (3.800126ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.707178  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.457044ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.712134  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:32.712201  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:32.713695  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (6.270668ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.713766  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.218697ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.716554  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:32.716593  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:32.718084  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (4.110969ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.718136  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.318445ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.721051  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:32.721092  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:32.722769  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (4.304267ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.722865  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.462412ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.724628  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:32.725122  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:32.725792  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:32.725844  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:32.727568  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (4.398817ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.727926  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.781295ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.729781  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:32.729852  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:32.729886  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:32.730235  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:32.730278  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:32.731790  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.225497ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.732117  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (4.253882ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.734675  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:32.734792  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:32.736019  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (3.602955ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.736573  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.339205ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.739919  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-0: (3.500839ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.741250  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-1: (1.026108ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.745290  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (3.638241ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.747695  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (915.573µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.750147  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (915.33µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.752631  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (978.346µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.755021  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (886.611µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.757332  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (764.532µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.759709  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (828.393µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.762109  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (932.636µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.764390  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (771.313µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.766701  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (801.5µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.769200  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (896.436µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.771690  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (941.907µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.774066  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (875.796µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.776581  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (960.993µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.779004  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (965.974µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.781352  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (806.514µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.783658  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (881.502µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.786006  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (795.021µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.788479  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (886.378µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.791225  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.087816ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.793600  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (872.803µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.796162  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (963.621µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.798840  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (1.016602ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.801471  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.005093ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.804197  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.052215ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.807103  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (1.285688ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.809601  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (963.599µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.812519  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (1.244111ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.815338  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (1.166964ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.818350  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.282502ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.820964  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (933.692µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.823570  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (945.358µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.826081  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (950.641µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.828599  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (935.428µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.831606  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.257845ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.834247  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.110379ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.836753  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (938.593µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.839560  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.142105ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.842358  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.147878ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.845224  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.265357ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.847676  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (943.261µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.850247  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.01743ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.852867  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (896.027µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.855559  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.00475ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.858175  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (964.727µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.860654  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (882.665µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.863241  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (995.956µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.865653  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (855.542µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.868248  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (973.208µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.870787  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (895.511µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.873226  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (862.765µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.875589  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-0: (850.376µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.878224  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-1: (967.379µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.882002  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (881.089µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.884363  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.887741ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.884545  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-0
I0212 07:24:32.884593  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-0
I0212 07:24:32.884733  124047 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-0", node "node1"
I0212 07:24:32.884759  124047 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I0212 07:24:32.884825  124047 factory.go:733] Attempting to bind rpod-0 to node1
I0212 07:24:32.886726  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-0/binding: (1.617363ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.886875  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.015377ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.886931  124047 scheduler.go:571] pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 07:24:32.887271  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-1
I0212 07:24:32.887284  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-1
I0212 07:24:32.887379  124047 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-1", node "node1"
I0212 07:24:32.887394  124047 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I0212 07:24:32.887438  124047 factory.go:733] Attempting to bind rpod-1 to node1
I0212 07:24:32.888845  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.604398ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:32.889248  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-1/binding: (1.517408ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.889475  124047 scheduler.go:571] pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 07:24:32.891307  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.571438ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:32.989724  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-0: (2.025618ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:33.092301  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-1: (1.652183ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:33.092797  124047 preemption_test.go:561] Creating the preemptor pod...
I0212 07:24:33.095310  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.095063ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:33.095442  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod
I0212 07:24:33.095467  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod
I0212 07:24:33.095592  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.095609  124047 preemption_test.go:567] Creating additional pods...
I0212 07:24:33.095645  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.097535  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.677202ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:33.097975  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.63278ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35332]
I0212 07:24:33.098071  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.647544ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35334]
I0212 07:24:33.098547  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod/status: (2.666754ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35210]
I0212 07:24:33.099771  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.752228ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:33.100379  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.194487ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35334]
I0212 07:24:33.100643  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.102185  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.993251ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:33.103966  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod/status: (2.827946ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35334]
I0212 07:24:33.104366  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.731492ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:33.106585  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.546617ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:33.110191  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (3.081136ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:33.111526  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-1: (6.537121ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35332]
I0212 07:24:33.111777  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0
I0212 07:24:33.111797  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0
I0212 07:24:33.111924  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.111969  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.121057  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (8.842131ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35332]
I0212 07:24:33.122901  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (12.250877ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:33.122991  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (10.574005ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35356]
I0212 07:24:33.126973  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0/status: (14.494069ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35354]
I0212 07:24:33.133382  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (9.299208ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:33.134639  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (13.078164ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35332]
I0212 07:24:33.135943  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (2.401523ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35354]
I0212 07:24:33.136210  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.136371  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.100267ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:33.136469  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5
I0212 07:24:33.136481  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5
I0212 07:24:33.136631  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.136672  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.138189  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (1.274252ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35356]
I0212 07:24:33.139426  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.226521ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35376]
I0212 07:24:33.139677  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.0639ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35194]
I0212 07:24:33.139917  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5/status: (2.944607ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35332]
I0212 07:24:33.141771  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (1.369683ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35332]
I0212 07:24:33.142001  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.142090  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.892886ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35376]
I0212 07:24:33.142166  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8
I0212 07:24:33.142189  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8
I0212 07:24:33.142291  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.142349  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.146658  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (3.655111ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35356]
I0212 07:24:33.147240  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (4.201333ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35378]
I0212 07:24:33.148862  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (5.662474ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35380]
I0212 07:24:33.151690  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8/status: (8.960611ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35332]
I0212 07:24:33.153030  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (3.953153ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35356]
I0212 07:24:33.153751  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (1.343051ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35380]
I0212 07:24:33.154104  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.154302  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10
I0212 07:24:33.154391  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10
I0212 07:24:33.154533  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.154707  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.155684  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.027487ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35356]
I0212 07:24:33.156812  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.550813ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35378]
I0212 07:24:33.159473  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.827315ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35356]
I0212 07:24:33.159556  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10/status: (4.275152ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35380]
I0212 07:24:33.160181  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (4.203547ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35382]
I0212 07:24:33.162259  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.290234ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35382]
I0212 07:24:33.162657  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.162840  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12
I0212 07:24:33.162861  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12
I0212 07:24:33.162995  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.163077  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.163561  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.856965ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35356]
I0212 07:24:33.165844  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (2.090803ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35378]
I0212 07:24:33.165914  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12/status: (2.129174ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35382]
I0212 07:24:33.166268  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.963721ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35356]
I0212 07:24:33.167527  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (1.19636ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35382]
I0212 07:24:33.167533  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.89794ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35384]
I0212 07:24:33.167839  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.168007  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14
I0212 07:24:33.168018  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14
I0212 07:24:33.168136  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.168170  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.169693  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.718078ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35356]
I0212 07:24:33.170700  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14/status: (1.846949ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35378]
I0212 07:24:33.171171  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (1.208778ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35356]
I0212 07:24:33.171732  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.452398ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35390]
I0212 07:24:33.172211  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (1.127499ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35378]
I0212 07:24:33.172298  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (3.445394ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35388]
I0212 07:24:33.172473  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.172728  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16
I0212 07:24:33.172753  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16
I0212 07:24:33.172925  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.172974  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.174371  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.106766ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35392]
I0212 07:24:33.174547  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (1.358179ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35378]
I0212 07:24:33.175412  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16/status: (2.211447ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35356]
I0212 07:24:33.175927  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.288067ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35394]
I0212 07:24:33.177085  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (1.275339ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35356]
I0212 07:24:33.177148  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.3203ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35378]
I0212 07:24:33.177421  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.177674  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18
I0212 07:24:33.177699  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18
I0212 07:24:33.177834  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.177885  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.179106  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.476213ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35394]
I0212 07:24:33.179630  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.473342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35392]
I0212 07:24:33.180249  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.691745ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35398]
I0212 07:24:33.182251  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.483463ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35394]
I0212 07:24:33.184828  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.112416ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35398]
I0212 07:24:33.187548  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.966584ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35398]
I0212 07:24:33.189834  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.726826ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35398]
I0212 07:24:33.191937  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.73889ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35398]
I0212 07:24:33.193422  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18/status: (15.000832ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35396]
I0212 07:24:33.194760  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.126756ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35398]
I0212 07:24:33.195865  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.454592ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35396]
I0212 07:24:33.196466  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.196743  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:33.196778  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:33.196962  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.197160  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.197984  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.409508ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35398]
I0212 07:24:33.200165  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (2.766594ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35396]
I0212 07:24:33.200792  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.547056ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35398]
I0212 07:24:33.200892  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20/status: (2.927344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35392]
I0212 07:24:33.202859  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (4.04803ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35400]
I0212 07:24:33.202999  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (1.561009ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35398]
I0212 07:24:33.203311  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.203546  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:33.203595  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:33.203713  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.203801  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.205677  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.37028ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35400]
I0212 07:24:33.206159  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.708356ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35404]
I0212 07:24:33.206383  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (1.787751ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35402]
I0212 07:24:33.206680  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27/status: (2.586475ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35396]
I0212 07:24:33.208138  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.883129ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35400]
I0212 07:24:33.208591  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (1.430575ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35396]
I0212 07:24:33.208859  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.209019  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:33.209034  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:33.209124  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.209185  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.211251  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.676841ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35400]
I0212 07:24:33.211526  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (1.233019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35406]
I0212 07:24:33.211663  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.806978ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35404]
I0212 07:24:33.212663  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29/status: (2.797512ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35396]
I0212 07:24:33.214130  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (1.101929ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35396]
I0212 07:24:33.214378  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.214541  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:33.214567  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:33.214675  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.214725  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.214826  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.592159ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35404]
I0212 07:24:33.216926  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (1.546988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35400]
I0212 07:24:33.217635  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31/status: (1.971227ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35396]
I0212 07:24:33.217967  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.81513ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35404]
I0212 07:24:33.218726  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.248165ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35410]
I0212 07:24:33.219157  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (1.060982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35400]
I0212 07:24:33.219482  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.219643  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:33.219663  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:33.219763  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.219800  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.221255  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.119602ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35404]
I0212 07:24:33.223073  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29/status: (2.752135ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35408]
I0212 07:24:33.223394  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (3.389197ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35400]
I0212 07:24:33.224090  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-29.15828d04ba055c5c: (3.492661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35412]
I0212 07:24:33.224530  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.760124ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35404]
I0212 07:24:33.224762  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (1.299173ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35408]
I0212 07:24:33.225063  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.225232  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:33.225273  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:33.225526  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.225589  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.227365  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.203596ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35414]
I0212 07:24:33.228133  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (3.036752ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35412]
I0212 07:24:33.228820  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34/status: (1.705117ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35416]
I0212 07:24:33.230132  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.445279ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35412]
I0212 07:24:33.231308  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (5.397025ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35400]
I0212 07:24:33.232516  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (3.008708ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35416]
I0212 07:24:33.233143  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.538394ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35412]
I0212 07:24:33.233660  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.233838  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:33.233857  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:33.233965  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.234015  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.236009  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36/status: (1.687226ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35414]
I0212 07:24:33.236236  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.676845ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35400]
I0212 07:24:33.238348  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.839072ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35414]
I0212 07:24:33.238734  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (2.135836ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35400]
I0212 07:24:33.238946  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.054982ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35420]
I0212 07:24:33.238972  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.239130  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:33.239142  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:33.239267  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.239303  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.239434  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (4.704426ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35418]
I0212 07:24:33.241842  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.037439ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35414]
I0212 07:24:33.242903  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (2.357478ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35418]
I0212 07:24:33.243357  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39/status: (3.629151ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35400]
I0212 07:24:33.243645  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.354841ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35422]
I0212 07:24:33.244880  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.0581ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35418]
I0212 07:24:33.245515  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.245888  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.719632ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35422]
I0212 07:24:33.246105  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:33.246227  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:33.246339  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.246410  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.249617  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.896385ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35426]
I0212 07:24:33.250338  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (3.65947ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35424]
I0212 07:24:33.251837  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41/status: (4.917273ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35414]
I0212 07:24:33.252758  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.671625ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35426]
I0212 07:24:33.253722  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (1.52097ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35414]
I0212 07:24:33.254133  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.254288  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (6.602126ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35428]
I0212 07:24:33.254385  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:33.254407  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:33.254560  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.254635  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.255176  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.676408ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35426]
I0212 07:24:33.256675  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.306412ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35430]
I0212 07:24:33.257414  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43/status: (2.040947ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35424]
I0212 07:24:33.257888  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.279735ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35426]
I0212 07:24:33.258686  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (1.401321ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35414]
I0212 07:24:33.259232  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (1.023923ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35424]
I0212 07:24:33.259577  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.259778  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:33.259794  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:33.259923  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.259960  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.260209  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.716419ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35426]
I0212 07:24:33.262612  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45/status: (2.053656ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35414]
I0212 07:24:33.262627  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (2.324372ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35430]
I0212 07:24:33.262932  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.163725ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35426]
I0212 07:24:33.263933  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.150356ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35432]
I0212 07:24:33.265140  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (1.563891ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35414]
I0212 07:24:33.265443  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.265699  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:33.265719  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:33.265847  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.265906  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.267207  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (1.068984ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35426]
I0212 07:24:33.271140  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43/status: (4.977129ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35430]
I0212 07:24:33.272880  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (1.298085ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35430]
I0212 07:24:33.273337  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.273460  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-43.15828d04bcbad9d1: (6.796699ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35434]
I0212 07:24:33.273486  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:33.273620  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:33.273754  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.273834  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.276243  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.666731ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35436]
I0212 07:24:33.277483  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49/status: (3.357456ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35430]
I0212 07:24:33.277620  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (2.988655ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35426]
I0212 07:24:33.279377  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.398234ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35430]
I0212 07:24:33.279647  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.279836  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:33.279856  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:33.279978  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.280058  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.282010  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.518664ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35436]
I0212 07:24:33.282687  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48/status: (2.377751ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35430]
I0212 07:24:33.282951  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.452751ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35438]
I0212 07:24:33.284404  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.156795ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35430]
I0212 07:24:33.284792  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.285092  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:33.285113  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:33.285213  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.285262  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.288134  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (2.08897ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35436]
I0212 07:24:33.288240  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49/status: (2.059514ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35430]
I0212 07:24:33.288980  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-49.15828d04bddfd24f: (2.399895ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35440]
I0212 07:24:33.290360  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.407204ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35430]
I0212 07:24:33.290852  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.291057  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:33.291099  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:33.291222  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.291276  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.293766  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48/status: (1.821879ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35440]
I0212 07:24:33.295415  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.255748ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35440]
I0212 07:24:33.295773  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.295785  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (3.858951ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35436]
I0212 07:24:33.296164  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:33.296186  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:33.296271  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.296344  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.298013  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-48.15828d04be3e91d7: (5.590539ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35442]
I0212 07:24:33.298597  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.733893ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35440]
I0212 07:24:33.299846  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47/status: (3.212344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35436]
I0212 07:24:33.300646  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.765083ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35442]
I0212 07:24:33.301222  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.020474ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35436]
I0212 07:24:33.301538  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.301738  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:33.301797  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:33.301953  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.302012  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.303382  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.121081ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35440]
I0212 07:24:33.304197  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.577679ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35444]
I0212 07:24:33.304628  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46/status: (2.358772ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35442]
I0212 07:24:33.306264  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.249556ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35444]
I0212 07:24:33.306566  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.306767  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:33.306800  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:33.306998  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.307074  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.310407  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (2.224681ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35440]
I0212 07:24:33.310589  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47/status: (3.185025ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35444]
I0212 07:24:33.311674  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-47.15828d04bf374c90: (3.48133ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35446]
I0212 07:24:33.312146  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.122264ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35444]
I0212 07:24:33.312674  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.312873  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:33.312891  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:33.313070  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.313130  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.314885  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.348179ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35446]
I0212 07:24:33.315252  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46/status: (1.715976ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35440]
I0212 07:24:33.317446  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.639436ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35440]
I0212 07:24:33.318146  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.318264  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-46.15828d04bf8dc68b: (2.837889ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35446]
I0212 07:24:33.318364  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:33.318374  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:33.318484  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.318553  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.319709  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (999.07µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35440]
I0212 07:24:33.320543  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.42711ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35450]
I0212 07:24:33.321344  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44/status: (2.515721ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35448]
I0212 07:24:33.322981  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.173615ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35450]
I0212 07:24:33.323257  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.323473  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:33.323516  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:33.323643  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.323700  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.325577  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (1.670178ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35450]
I0212 07:24:33.326114  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41/status: (2.211408ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35440]
I0212 07:24:33.327316  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-41.15828d04bc3d5801: (2.589006ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35452]
I0212 07:24:33.327460  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (942.537µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35440]
I0212 07:24:33.327759  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.327956  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:33.327982  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:33.328140  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.328204  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.330123  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.262031ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35452]
I0212 07:24:33.330980  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44/status: (2.163714ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35450]
I0212 07:24:33.332261  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-44.15828d04c08a314a: (2.727877ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35454]
I0212 07:24:33.332566  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.183899ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35450]
I0212 07:24:33.332818  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.332998  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:33.333017  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:33.333140  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.333189  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.335213  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.68069ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35452]
I0212 07:24:33.335526  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.474205ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35456]
I0212 07:24:33.335879  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42/status: (2.142012ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35454]
I0212 07:24:33.337982  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.584237ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35456]
I0212 07:24:33.338246  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.338432  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:33.338451  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:33.338599  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.338657  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.339907  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.047406ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35456]
I0212 07:24:33.340553  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39/status: (1.696462ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35452]
I0212 07:24:33.342291  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.371367ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35452]
I0212 07:24:33.342426  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-39.15828d04bbd0f4ad: (2.746336ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35458]
I0212 07:24:33.342608  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.342864  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:33.342890  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:33.343017  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.343090  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.344871  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42/status: (1.492651ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35456]
I0212 07:24:33.345251  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.912619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35452]
I0212 07:24:33.346635  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-42.15828d04c16984a4: (2.643514ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35460]
I0212 07:24:33.347137  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.908021ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35456]
I0212 07:24:33.347535  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.347714  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:33.347733  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:33.347861  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.347918  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.349996  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.459027ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35452]
I0212 07:24:33.350028  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36/status: (1.867951ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35460]
I0212 07:24:33.351663  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-36.15828d04bb8038e5: (2.368413ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35462]
I0212 07:24:33.352008  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.425822ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35452]
I0212 07:24:33.352795  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.353151  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:33.353180  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:33.353335  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.353387  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.355669  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.459472ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35460]
I0212 07:24:33.356671  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40/status: (2.932518ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35462]
I0212 07:24:33.357936  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.650206ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35460]
I0212 07:24:33.359717  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35460]
I0212 07:24:33.360294  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.360576  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:33.360633  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:33.360845  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.360929  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.365114  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (3.73974ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35464]
I0212 07:24:33.365627  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38/status: (3.454488ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35462]
I0212 07:24:33.365911  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.612882ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35466]
I0212 07:24:33.366465  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.499531ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35468]
I0212 07:24:33.367096  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.058071ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35462]
I0212 07:24:33.367445  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.367655  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:33.367674  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:33.367775  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.367835  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.369147  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.031388ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35466]
I0212 07:24:33.370379  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40/status: (2.334485ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35464]
I0212 07:24:33.371597  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-40.15828d04c29db2b5: (2.72809ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35470]
I0212 07:24:33.372693  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.154436ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35464]
I0212 07:24:33.373051  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.373276  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:33.373329  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:33.373483  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.373567  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.375892  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38/status: (2.066699ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35470]
I0212 07:24:33.376802  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (2.398733ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35466]
I0212 07:24:33.377583  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.241434ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35470]
I0212 07:24:33.378072  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.378107  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-38.15828d04c310c540: (3.133033ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35472]
I0212 07:24:33.378220  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:33.378264  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:33.378376  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.378439  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.379785  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.066472ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35470]
I0212 07:24:33.380820  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34/status: (2.051642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35466]
I0212 07:24:33.381850  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-34.15828d04baffb5c2: (2.229643ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35474]
I0212 07:24:33.382381  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.095315ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35466]
I0212 07:24:33.382742  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.382933  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:33.382952  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:33.383076  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.383136  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.384331  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (971.516µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35474]
I0212 07:24:33.385257  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37/status: (1.861952ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35470]
I0212 07:24:33.386116  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.415777ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35476]
I0212 07:24:33.387225  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.161507ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35470]
I0212 07:24:33.387601  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.387909  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:33.387933  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:33.388078  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.388177  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.390992  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.946829ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35478]
I0212 07:24:33.391812  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35/status: (3.193988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35476]
I0212 07:24:33.391863  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (3.066798ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35474]
I0212 07:24:33.393476  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.102401ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35476]
I0212 07:24:33.393783  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.393947  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:33.393969  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:33.394119  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.394182  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.396721  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.849453ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35478]
I0212 07:24:33.396897  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37/status: (1.983435ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35476]
I0212 07:24:33.398580  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-37.15828d04c4639f09: (3.621731ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35480]
I0212 07:24:33.398615  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.292644ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35476]
I0212 07:24:33.398869  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.399093  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:33.399114  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:33.399266  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.399339  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.400907  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.31924ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35478]
I0212 07:24:33.401170  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35/status: (1.559109ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35480]
I0212 07:24:33.402576  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-35.15828d04c4b08b10: (2.355895ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35482]
I0212 07:24:33.403168  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.455122ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35480]
I0212 07:24:33.403524  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.403669  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:33.403689  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:33.403801  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.403857  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.405944  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31/status: (1.714986ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35478]
I0212 07:24:33.406133  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (1.906656ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35482]
I0212 07:24:33.407829  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (1.272823ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35482]
I0212 07:24:33.408064  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-31.15828d04ba59e249: (3.411312ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35484]
I0212 07:24:33.408179  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.408353  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:33.408376  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:33.408469  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.408537  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.410911  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.636454ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35486]
I0212 07:24:33.411238  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33/status: (1.87329ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35482]
I0212 07:24:33.411444  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (2.34478ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35478]
I0212 07:24:33.412886  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.165817ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35482]
I0212 07:24:33.413187  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.413388  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:33.413408  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:33.413568  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.413624  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.415441  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.529339ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35486]
I0212 07:24:33.415968  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.65561ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35488]
I0212 07:24:33.416181  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32/status: (2.322976ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35478]
I0212 07:24:33.417906  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.298258ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35488]
I0212 07:24:33.418216  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.418426  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:33.418445  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:33.418663  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.418720  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.420136  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.104218ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35486]
I0212 07:24:33.420781  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33/status: (1.850596ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35488]
I0212 07:24:33.422599  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.082046ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35488]
I0212 07:24:33.422826  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.423053  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:33.423098  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:33.423127  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-33.15828d04c5e73f6a: (2.66772ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35486]
I0212 07:24:33.423362  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.423410  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.425104  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.337854ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35490]
I0212 07:24:33.425443  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32/status: (1.643065ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35488]
I0212 07:24:33.426904  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.079012ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35488]
I0212 07:24:33.427266  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.427273  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-32.15828d04c634dd45: (3.028485ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35492]
I0212 07:24:33.427542  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:33.427572  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:33.427839  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.427887  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.429197  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.08563ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35488]
I0212 07:24:33.429653  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30/status: (1.522847ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35490]
I0212 07:24:33.430466  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.661886ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35494]
I0212 07:24:33.431204  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.20836ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35490]
I0212 07:24:33.431482  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.431675  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:33.431739  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:33.431878  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.431937  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.433987  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20/status: (1.803303ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35494]
I0212 07:24:33.434417  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (2.217722ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35488]
I0212 07:24:33.435456  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-20.15828d04b94dcd94: (2.685222ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35496]
I0212 07:24:33.435862  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (1.158228ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35494]
I0212 07:24:33.436200  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.436406  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:33.436445  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:33.436600  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.436802  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.438422  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.412111ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35496]
I0212 07:24:33.439246  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30/status: (1.880444ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35488]
I0212 07:24:33.440634  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-30.15828d04c70e7e7c: (2.427993ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35498]
I0212 07:24:33.441663  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.949726ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35488]
I0212 07:24:33.441944  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.442141  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:33.442161  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:33.442304  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.442376  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.443671  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.064006ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35496]
I0212 07:24:33.444126  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28/status: (1.513722ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35498]
I0212 07:24:33.444816  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.912006ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35500]
I0212 07:24:33.445834  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.125328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35498]
I0212 07:24:33.446126  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.446327  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:33.446347  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:33.446462  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.446538  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.447785  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (995.582µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35496]
I0212 07:24:33.448487  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26/status: (1.711028ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35500]
I0212 07:24:33.449064  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.979161ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35502]
I0212 07:24:33.450417  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (1.158565ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35500]
I0212 07:24:33.450760  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.450905  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:33.450927  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:33.451057  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.451119  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.452738  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.292883ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35496]
I0212 07:24:33.453184  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28/status: (1.816229ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35502]
I0212 07:24:33.454734  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-28.15828d04c7eb90ef: (2.663702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35504]
I0212 07:24:33.454790  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.086918ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35502]
I0212 07:24:33.455010  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.455192  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:33.455212  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:33.455321  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.455381  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.456814  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (1.181799ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35504]
I0212 07:24:33.457741  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26/status: (2.109866ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35496]
I0212 07:24:33.458656  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-26.15828d04c82b0d2f: (2.3641ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35506]
I0212 07:24:33.459244  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (1.141528ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35496]
I0212 07:24:33.459562  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.459791  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:33.459812  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:33.459976  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.460053  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.462007  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (1.681671ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35504]
I0212 07:24:33.462802  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.165578ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35508]
I0212 07:24:33.462915  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25/status: (2.613809ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35506]
I0212 07:24:33.464566  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (1.126137ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35508]
I0212 07:24:33.464850  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.465059  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:33.465080  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:33.465203  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.465265  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.466679  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (1.088141ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35504]
I0212 07:24:33.467720  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.819991ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35510]
I0212 07:24:33.468255  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24/status: (2.688564ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35508]
I0212 07:24:33.468878  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.202499ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35504]
I0212 07:24:33.469836  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (1.005291ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35512]
I0212 07:24:33.470228  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.470416  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:33.470435  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:33.470571  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.470627  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.472064  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (1.104823ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35510]
I0212 07:24:33.473388  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25/status: (2.336288ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35504]
I0212 07:24:33.475020  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-25.15828d04c8f91d17: (3.210617ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35514]
I0212 07:24:33.475122  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (1.262581ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35504]
I0212 07:24:33.475338  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.475568  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:33.475588  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:33.475791  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.475898  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.478139  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24/status: (1.89079ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35514]
I0212 07:24:33.478354  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (2.191181ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35510]
I0212 07:24:33.479006  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-24.15828d04c948c509: (2.386383ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35516]
I0212 07:24:33.479888  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (1.111976ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35510]
I0212 07:24:33.480183  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.480444  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:33.480464  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:33.480576  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.480625  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.482574  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.335448ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35514]
I0212 07:24:33.483141  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23/status: (2.292232ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35516]
I0212 07:24:33.483472  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.152109ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35518]
I0212 07:24:33.484840  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.176434ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35516]
I0212 07:24:33.485189  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.485378  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:33.485400  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:33.485512  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.485575  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.488191  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.900363ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35514]
I0212 07:24:33.488260  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22/status: (2.379796ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35518]
I0212 07:24:33.488281  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.971905ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35520]
I0212 07:24:33.489877  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.133623ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35518]
I0212 07:24:33.490197  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.490427  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:33.490450  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:33.490729  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.490787  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.492114  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.125646ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35520]
I0212 07:24:33.494148  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23/status: (3.025531ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35514]
I0212 07:24:33.494156  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-23.15828d04ca333b64: (2.23005ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35522]
I0212 07:24:33.495992  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.202994ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35522]
I0212 07:24:33.496345  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.496555  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:33.496576  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:33.496690  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.496747  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.498742  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22/status: (1.748639ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35522]
I0212 07:24:33.498772  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.238635ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35520]
I0212 07:24:33.499894  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-22.15828d04ca7e9d7b: (2.242363ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35524]
I0212 07:24:33.500394  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.150498ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35522]
I0212 07:24:33.500656  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.500874  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18
I0212 07:24:33.500896  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18
I0212 07:24:33.501027  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.501096  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.503258  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18/status: (1.90482ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35524]
I0212 07:24:33.503689  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.785784ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35520]
I0212 07:24:33.504568  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-18.15828d04b827b814: (2.54811ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35526]
I0212 07:24:33.504995  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.112186ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35524]
I0212 07:24:33.505288  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.505446  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:33.505456  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:33.505567  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.505606  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.507861  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.58881ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35528]
I0212 07:24:33.508546  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21/status: (2.711837ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35526]
I0212 07:24:33.508888  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (2.650096ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35520]
I0212 07:24:33.509968  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (1.093891ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35526]
I0212 07:24:33.510237  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.514741  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16
I0212 07:24:33.514826  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16
I0212 07:24:33.515166  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.515290  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.527982  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (2.099558ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35520]
I0212 07:24:33.528718  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16/status: (2.844645ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35528]
I0212 07:24:33.529836  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-16.15828d04b7dcd78f: (2.944703ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35530]
I0212 07:24:33.530313  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (1.047188ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35528]
I0212 07:24:33.530619  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.530810  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:33.530835  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:33.530902  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.530945  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.532915  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (1.27073ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35520]
I0212 07:24:33.532923  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21/status: (1.753892ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35530]
I0212 07:24:33.534821  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-21.15828d04cbb06527: (3.019115ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35532]
I0212 07:24:33.535335  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (1.282472ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35530]
I0212 07:24:33.535624  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.535797  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:33.535823  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:33.535931  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.535989  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.537311  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.0794ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35520]
I0212 07:24:33.538098  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.454953ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35534]
I0212 07:24:33.538720  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19/status: (2.459327ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35532]
I0212 07:24:33.540330  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.162257ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35534]
I0212 07:24:33.540601  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.540814  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17
I0212 07:24:33.540871  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17
I0212 07:24:33.541029  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.541107  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.542367  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.04312ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35534]
I0212 07:24:33.542856  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.403832ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35520]
I0212 07:24:33.543741  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17/status: (2.038053ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35536]
I0212 07:24:33.545243  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.031421ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35520]
I0212 07:24:33.545557  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.545758  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:33.545780  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:33.545940  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.546006  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.548008  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.685273ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35534]
I0212 07:24:33.548132  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19/status: (1.876924ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35520]
I0212 07:24:33.549273  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-19.15828d04cd7ff76c: (2.513894ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35538]
I0212 07:24:33.549772  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.18813ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35520]
I0212 07:24:33.550155  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.550358  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17
I0212 07:24:33.550379  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17
I0212 07:24:33.550508  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.550570  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.552147  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.282053ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35534]
I0212 07:24:33.552640  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17/status: (1.75992ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35538]
I0212 07:24:33.553687  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-17.15828d04cdce1388: (2.218877ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35540]
I0212 07:24:33.554124  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.028702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35538]
I0212 07:24:33.554422  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.554626  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15
I0212 07:24:33.554642  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15
I0212 07:24:33.554745  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.554793  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.556223  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.200577ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35540]
I0212 07:24:33.556858  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15/status: (1.747189ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35534]
I0212 07:24:33.557298  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.766688ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35542]
I0212 07:24:33.558541  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.255369ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35534]
I0212 07:24:33.558820  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.559008  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13
I0212 07:24:33.559028  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13
I0212 07:24:33.559165  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.559212  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.560948  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (1.218396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35540]
I0212 07:24:33.561581  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13/status: (2.11799ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35542]
I0212 07:24:33.561832  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.929244ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35544]
I0212 07:24:33.563197  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (1.213258ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35542]
I0212 07:24:33.563576  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.563776  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15
I0212 07:24:33.563798  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15
I0212 07:24:33.563952  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.564009  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.565513  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.055622ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35540]
I0212 07:24:33.566397  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15/status: (2.128988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35544]
I0212 07:24:33.567421  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-15.15828d04ce9ef66a: (2.447805ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35546]
I0212 07:24:33.568230  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.386146ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35544]
I0212 07:24:33.568372  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.30883ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35540]
I0212 07:24:33.568521  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.568681  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13
I0212 07:24:33.568701  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13
I0212 07:24:33.568854  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.568908  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.570272  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (1.064639ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35546]
I0212 07:24:33.571245  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13/status: (2.052814ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35544]
I0212 07:24:33.572351  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-13.15828d04cee25afe: (2.455964ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35548]
I0212 07:24:33.572865  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (1.140091ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35544]
I0212 07:24:33.573128  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.573396  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10
I0212 07:24:33.573422  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10
I0212 07:24:33.573586  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.573646  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.575178  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.261857ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35546]
I0212 07:24:33.576064  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10/status: (2.117897ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35548]
I0212 07:24:33.577678  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.224503ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35548]
I0212 07:24:33.577716  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-10.15828d04b6c5faed: (3.137ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35550]
I0212 07:24:33.577946  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.578138  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11
I0212 07:24:33.578159  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11
I0212 07:24:33.578292  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.578350  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.579702  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (1.089332ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35546]
I0212 07:24:33.580147  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11/status: (1.535528ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35548]
I0212 07:24:33.580790  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.880218ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35552]
I0212 07:24:33.581463  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (990.414µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35548]
I0212 07:24:33.581772  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.581942  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8
I0212 07:24:33.581964  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8
I0212 07:24:33.582111  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.582172  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.583980  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (1.208339ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35546]
I0212 07:24:33.584610  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8/status: (1.825453ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35552]
I0212 07:24:33.585270  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-8.15828d04b6095c79: (2.331319ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35554]
I0212 07:24:33.586074  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (1.07513ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35552]
I0212 07:24:33.586390  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.586634  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11
I0212 07:24:33.586656  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11
I0212 07:24:33.586744  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.586793  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.588646  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (1.063871ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35546]
I0212 07:24:33.589338  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11/status: (2.307357ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35554]
I0212 07:24:33.590484  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-11.15828d04d0066d68: (2.253842ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35556]
I0212 07:24:33.590871  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (1.100851ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35554]
I0212 07:24:33.591189  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.591436  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9
I0212 07:24:33.591474  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9
I0212 07:24:33.591601  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.591660  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.593476  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (1.503275ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35546]
I0212 07:24:33.594074  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9/status: (2.195059ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35554]
I0212 07:24:33.594521  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.261337ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35558]
I0212 07:24:33.595825  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (1.170956ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35546]
I0212 07:24:33.596064  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.596240  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5
I0212 07:24:33.596264  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5
I0212 07:24:33.596417  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.596582  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.598054  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (1.108974ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35558]
I0212 07:24:33.598353  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5/status: (1.559916ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35554]
I0212 07:24:33.599528  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-5.15828d04b5b2eecf: (2.333454ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35560]
I0212 07:24:33.600072  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (976.927µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35554]
I0212 07:24:33.600392  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.600578  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9
I0212 07:24:33.600598  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9
I0212 07:24:33.600696  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.600731  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.601932  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (960.369µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35558]
I0212 07:24:33.602622  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9/status: (1.616179ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35560]
I0212 07:24:33.603850  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-9.15828d04d0d1748d: (2.223259ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35562]
I0212 07:24:33.604025  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (1.024109ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35560]
I0212 07:24:33.604300  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.604469  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7
I0212 07:24:33.604510  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7
I0212 07:24:33.604615  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.604669  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.605924  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (997.926µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35558]
I0212 07:24:33.606477  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7/status: (1.561946ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35562]
I0212 07:24:33.607335  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.137169ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35564]
I0212 07:24:33.608460  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (1.21939ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35562]
I0212 07:24:33.608731  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.608884  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6
I0212 07:24:33.608898  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6
I0212 07:24:33.608988  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.609034  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.610267  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (1.013024ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35558]
I0212 07:24:33.610986  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6/status: (1.751934ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35564]
I0212 07:24:33.611457  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.633533ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35566]
I0212 07:24:33.612549  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (1.055162ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35564]
I0212 07:24:33.612809  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.612998  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7
I0212 07:24:33.613018  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7
I0212 07:24:33.613119  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.613168  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.614934  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (962.975µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35558]
I0212 07:24:33.615018  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7/status: (1.575787ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35566]
I0212 07:24:33.616212  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-7.15828d04d197fabb: (2.160569ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35568]
I0212 07:24:33.616531  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (1.141089ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35566]
I0212 07:24:33.616793  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.617006  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6
I0212 07:24:33.617025  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6
I0212 07:24:33.617138  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.617186  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.618601  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (1.169094ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35568]
I0212 07:24:33.619250  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6/status: (1.875848ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35558]
I0212 07:24:33.620553  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-6.15828d04d1daa2e9: (2.565112ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35570]
I0212 07:24:33.620832  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (1.144538ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35558]
I0212 07:24:33.621081  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.621271  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0
I0212 07:24:33.621289  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0
I0212 07:24:33.621393  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.621447  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.622774  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (1.02914ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35570]
I0212 07:24:33.623575  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0/status: (1.821441ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35568]
I0212 07:24:33.624695  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-0.15828d04b439fc05: (2.017186ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35572]
I0212 07:24:33.625035  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (1.075733ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35568]
I0212 07:24:33.625302  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.625550  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4
I0212 07:24:33.625571  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4
I0212 07:24:33.625680  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.625734  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.627169  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (1.207816ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35572]
I0212 07:24:33.627628  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4/status: (1.647147ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35570]
I0212 07:24:33.628188  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.908768ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35574]
I0212 07:24:33.629283  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (1.237096ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35570]
I0212 07:24:33.629597  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.629810  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3
I0212 07:24:33.629839  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3
I0212 07:24:33.629972  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.630028  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.631906  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (1.679947ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35574]
I0212 07:24:33.632254  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3/status: (1.956198ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35572]
I0212 07:24:33.632224  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.679327ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35576]
I0212 07:24:33.633935  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (1.299507ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35576]
I0212 07:24:33.634341  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.634550  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4
I0212 07:24:33.634574  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4
I0212 07:24:33.634714  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.634770  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.636485  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (1.465942ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35576]
I0212 07:24:33.636889  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4/status: (1.499849ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35572]
I0212 07:24:33.638249  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-4.15828d04d2d96399: (2.426586ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35578]
I0212 07:24:33.638675  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (1.40983ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35572]
I0212 07:24:33.639071  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.639350  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3
I0212 07:24:33.640251  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3
I0212 07:24:33.641571  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.641692  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.643976  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3/status: (1.967136ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35578]
I0212 07:24:33.644068  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (1.163379ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35576]
I0212 07:24:33.645827  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (1.020495ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35578]
I0212 07:24:33.646035  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.646187  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-3.15828d04d31aeadd: (2.565179ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35580]
I0212 07:24:33.646201  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2
I0212 07:24:33.646212  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2
I0212 07:24:33.646291  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.646347  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.647825  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (1.09901ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35576]
I0212 07:24:33.648777  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2/status: (2.039987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35578]
I0212 07:24:33.648887  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.881554ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35582]
I0212 07:24:33.650242  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (1.096773ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35578]
I0212 07:24:33.650672  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.650936  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1
I0212 07:24:33.650957  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1
I0212 07:24:33.651095  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.651151  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.652754  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.344586ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35576]
I0212 07:24:33.653160  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1/status: (1.796157ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35578]
I0212 07:24:33.653354  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.449411ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35584]
I0212 07:24:33.654926  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.201432ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35578]
I0212 07:24:33.655152  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.655354  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2
I0212 07:24:33.655375  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2
I0212 07:24:33.655481  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.655570  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.657383  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2/status: (1.563465ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35576]
I0212 07:24:33.658135  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (2.347567ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35578]
I0212 07:24:33.658237  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-2.15828d04d413c109: (2.027047ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35586]
I0212 07:24:33.659959  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (1.826804ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35576]
I0212 07:24:33.660229  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.660434  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1
I0212 07:24:33.660453  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1
I0212 07:24:33.660574  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:33.660624  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:33.662020  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.110176ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35578]
I0212 07:24:33.662902  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1/status: (2.009653ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35586]
I0212 07:24:33.664141  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-1.15828d04d45d3670: (2.567219ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:33.664458  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.186646ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35586]
I0212 07:24:33.664797  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:33.668580  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.336389ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:33.724884  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:33.725304  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:33.730090  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:33.730113  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:33.730118  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:33.769652  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (2.217685ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:33.869349  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.992853ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:33.969257  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.96796ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:34.069379  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.97896ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:34.169413  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (2.037385ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:34.269697  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (2.332166ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:34.369033  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.762496ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:34.474890  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (7.554287ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:34.569398  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.970374ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:34.626752  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod
I0212 07:24:34.626793  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod
I0212 07:24:34.626967  124047 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod", node "node1"
I0212 07:24:34.626993  124047 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I0212 07:24:34.627061  124047 factory.go:733] Attempting to bind preemptor-pod to node1
I0212 07:24:34.627122  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0
I0212 07:24:34.627151  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0
I0212 07:24:34.627299  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.627357  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.629432  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod/binding: (1.975337ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:34.629656  124047 scheduler.go:571] pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 07:24:34.629687  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (2.122751ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35578]
I0212 07:24:34.629903  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0/status: (1.766282ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35766]
I0212 07:24:34.629928  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.630810  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-0.15828d04b439fc05: (2.571455ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35768]
I0212 07:24:34.631699  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (1.341791ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35578]
I0212 07:24:34.631972  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.632197  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6
I0212 07:24:34.632216  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6
I0212 07:24:34.632304  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.632360  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.632620  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.313351ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35768]
I0212 07:24:34.633872  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (1.262483ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:34.634156  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.634797  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6/status: (2.208329ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35578]
I0212 07:24:34.635651  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-6.15828d04d1daa2e9: (2.455987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35768]
I0212 07:24:34.636775  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (1.133716ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35578]
I0212 07:24:34.637022  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.637227  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7
I0212 07:24:34.637243  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7
I0212 07:24:34.637438  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.637531  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.639679  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (1.893229ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:34.639752  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7/status: (1.998727ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35768]
I0212 07:24:34.641899  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (1.476411ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35768]
I0212 07:24:34.642154  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.642385  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9
I0212 07:24:34.642409  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9
I0212 07:24:34.642569  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.642652  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.642979  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-7.15828d04d197fabb: (4.743172ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35770]
I0212 07:24:34.644618  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (1.623345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:34.644859  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.644922  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9/status: (1.923102ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35768]
I0212 07:24:34.646164  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-9.15828d04d0d1748d: (2.398742ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35770]
I0212 07:24:34.646374  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (1.041727ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35768]
I0212 07:24:34.646707  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.646911  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12
I0212 07:24:34.646931  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12
I0212 07:24:34.647070  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.647126  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.648301  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (932.91µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35770]
I0212 07:24:34.648684  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.649021  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12/status: (1.628074ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:34.649805  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-12.15828d04b7456628: (2.122206ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.650525  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (1.068654ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35588]
I0212 07:24:34.650822  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.651025  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5
I0212 07:24:34.651055  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5
I0212 07:24:34.651137  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.651184  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.652602  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (1.177528ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.652869  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.653161  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5/status: (1.743046ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35770]
I0212 07:24:34.654204  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-5.15828d04b5b2eecf: (2.28129ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35774]
I0212 07:24:34.654510  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (942.035µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35770]
I0212 07:24:34.654801  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.654979  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11
I0212 07:24:34.655000  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11
I0212 07:24:34.655127  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.655171  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.656420  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (1.054ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35774]
I0212 07:24:34.656754  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.657058  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11/status: (1.657152ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.658347  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (881.515µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.658616  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.658793  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8
I0212 07:24:34.658839  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-11.15828d04d0066d68: (2.920942ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35776]
I0212 07:24:34.658846  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8
I0212 07:24:34.658968  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.659013  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.660216  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (979.177µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.660434  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.661619  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8/status: (2.350916ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35774]
I0212 07:24:34.661915  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-8.15828d04b6095c79: (2.179339ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35778]
I0212 07:24:34.663140  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (1.123295ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35774]
I0212 07:24:34.663450  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.663658  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10
I0212 07:24:34.663681  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10
I0212 07:24:34.663809  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.663869  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.665362  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.214381ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.665910  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10/status: (1.79846ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35778]
I0212 07:24:34.666776  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-10.15828d04b6c5faed: (2.13012ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35780]
I0212 07:24:34.667537  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.147328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35778]
I0212 07:24:34.667743  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.667906  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13
I0212 07:24:34.667927  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13
I0212 07:24:34.668015  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.668077  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.668408  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.162378ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.668694  124047 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I0212 07:24:34.669581  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (981.085µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35780]
I0212 07:24:34.669870  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.670076  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (1.185705ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35782]
I0212 07:24:34.670429  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13/status: (1.81459ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35778]
I0212 07:24:34.671088  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-13.15828d04cee25afe: (2.39077ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.671949  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.390468ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35782]
I0212 07:24:34.672467  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (968.96µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35778]
I0212 07:24:34.672699  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.672945  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:34.672964  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:34.673092  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.673140  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.673533  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (1.123164ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.674665  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (1.23361ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35778]
I0212 07:24:34.675359  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.675588  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (1.675519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35784]
I0212 07:24:34.675698  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27/status: (2.232754ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35780]
I0212 07:24:34.676997  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-27.15828d04b9b33965: (3.173676ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.677025  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (1.09622ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35784]
I0212 07:24:34.677315  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (1.169093ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35778]
I0212 07:24:34.677551  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.677682  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15
I0212 07:24:34.677704  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15
I0212 07:24:34.677794  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.677853  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.678595  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (1.184849ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35784]
I0212 07:24:34.680026  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15/status: (1.883985ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.680171  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (1.077768ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35784]
I0212 07:24:34.680356  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (2.350313ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35778]
I0212 07:24:34.681617  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.135481ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35784]
I0212 07:24:34.681903  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-15.15828d04ce9ef66a: (3.417288ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35786]
I0212 07:24:34.682016  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (1.219342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35778]
I0212 07:24:34.682091  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.682257  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17
I0212 07:24:34.682280  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17
I0212 07:24:34.682380  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.682429  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.683645  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (1.22599ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35784]
I0212 07:24:34.683916  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.051354ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35788]
I0212 07:24:34.684176  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.684530  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17/status: (1.8595ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.685112  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (1.087951ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35784]
I0212 07:24:34.685210  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-17.15828d04cdce1388: (2.053246ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35790]
I0212 07:24:34.685986  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.039811ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.686275  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.686486  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:34.686538  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:34.686601  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.01257ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35784]
I0212 07:24:34.686616  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.686665  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.688107  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.265739ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.688249  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (1.101749ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35792]
I0212 07:24:34.689468  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-49.15828d04bddfd24f: (2.273137ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35794]
I0212 07:24:34.690159  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49/status: (3.250771ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35788]
I0212 07:24:34.690295  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (1.51178ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.691770  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.100836ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35794]
I0212 07:24:34.692000  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (984.905µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35772]
I0212 07:24:34.692059  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.692233  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:34.692256  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:34.692394  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.692447  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.693630  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (1.227329ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35794]
I0212 07:24:34.693872  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.210275ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35792]
I0212 07:24:34.694136  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.694575  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19/status: (1.573794ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35796]
I0212 07:24:34.695215  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.127387ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35794]
I0212 07:24:34.695238  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-19.15828d04cd7ff76c: (2.19479ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35798]
I0212 07:24:34.696310  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.278137ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35796]
I0212 07:24:34.696603  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.696764  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (1.074813ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35794]
I0212 07:24:34.696834  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:34.696863  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:34.696966  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.697030  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.698611  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.406308ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35796]
I0212 07:24:34.698611  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.126614ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35800]
I0212 07:24:34.698874  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.699138  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36/status: (1.898938ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35792]
I0212 07:24:34.700260  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.142705ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35800]
I0212 07:24:34.700614  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.055734ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35792]
I0212 07:24:34.701034  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.701299  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:34.701355  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-36.15828d04bb8038e5: (3.176828ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35802]
I0212 07:24:34.701356  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:34.701662  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.701714  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.701849  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.168103ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35800]
I0212 07:24:34.703128  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (1.142165ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35796]
I0212 07:24:34.703428  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (893.031µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35804]
I0212 07:24:34.703994  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21/status: (2.065031ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35792]
I0212 07:24:34.704329  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.704801  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-21.15828d04cbb06527: (2.513791ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35800]
I0212 07:24:34.704837  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (982.688µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35804]
I0212 07:24:34.705405  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (1.056029ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35792]
I0212 07:24:34.705704  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.705919  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:34.705941  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:34.706078  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.706141  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.707189  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.994726ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35804]
I0212 07:24:34.707912  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.150478ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35796]
I0212 07:24:34.708185  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.708607  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22/status: (1.8428ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35792]
I0212 07:24:34.708758  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.2116ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35804]
I0212 07:24:34.709414  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-22.15828d04ca7e9d7b: (2.5605ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35806]
I0212 07:24:34.710169  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.074101ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35792]
I0212 07:24:34.710377  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.710467  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (1.316626ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35796]
I0212 07:24:34.710567  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18
I0212 07:24:34.710591  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18
I0212 07:24:34.710682  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.710733  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.712168  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.269906ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35792]
I0212 07:24:34.712290  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (1.32293ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35806]
I0212 07:24:34.712431  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.713721  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18/status: (2.386708ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35808]
I0212 07:24:34.714295  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (1.295551ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35806]
I0212 07:24:34.714394  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-18.15828d04b827b814: (2.473413ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35810]
I0212 07:24:34.715224  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.106489ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35808]
I0212 07:24:34.715520  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.715727  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:34.715749  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:34.715775  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (1.060054ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35806]
I0212 07:24:34.715898  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.715949  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.717415  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.269814ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35810]
I0212 07:24:34.717430  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.119514ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35812]
I0212 07:24:34.717693  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.718153  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40/status: (2.006369ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35792]
I0212 07:24:34.718745  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (941.547µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35812]
I0212 07:24:34.719463  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-40.15828d04c29db2b5: (2.484183ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35814]
I0212 07:24:34.719826  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.120699ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35792]
I0212 07:24:34.720096  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.720238  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.132197ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35812]
I0212 07:24:34.720286  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:34.720296  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:34.720383  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.720429  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.722033  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (1.197877ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35816]
I0212 07:24:34.722134  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.454337ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35810]
I0212 07:24:34.722450  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.722565  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23/status: (1.781619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35814]
I0212 07:24:34.723936  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-23.15828d04ca333b64: (2.549541ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35818]
I0212 07:24:34.723950  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.244047ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35816]
I0212 07:24:34.724127  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.190722ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35814]
I0212 07:24:34.724364  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.724565  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:34.724584  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:34.724676  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.724735  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.725083  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:34.725531  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:34.726747  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (1.468888ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35820]
I0212 07:24:34.726796  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24/status: (1.768916ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35810]
I0212 07:24:34.726772  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (2.219929ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35814]
I0212 07:24:34.728037  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-24.15828d04c948c509: (2.322355ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35822]
I0212 07:24:34.728173  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (933.515µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35820]
I0212 07:24:34.728402  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.728555  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:34.728576  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:34.728657  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.728684  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.414638ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35814]
I0212 07:24:34.728727  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.730020  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (1.093537ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35820]
I0212 07:24:34.730359  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.730417  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:34.730421  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:34.730478  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:34.730905  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25/status: (1.920164ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35822]
I0212 07:24:34.731269  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.976613ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35824]
I0212 07:24:34.732332  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-25.15828d04c8f91d17: (2.937101ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35826]
I0212 07:24:34.732885  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.137465ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35824]
I0212 07:24:34.732916  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (1.630878ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35822]
I0212 07:24:34.733152  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.733291  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16
I0212 07:24:34.733312  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16
I0212 07:24:34.733405  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.733452  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.734343  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.117913ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35826]
I0212 07:24:34.735299  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (1.378504ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35820]
I0212 07:24:34.735579  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.735904  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.112839ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35826]
I0212 07:24:34.736206  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-16.15828d04b7dcd78f: (2.045488ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35830]
I0212 07:24:34.736762  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16/status: (2.649682ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35828]
I0212 07:24:34.737431  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.148617ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35826]
I0212 07:24:34.738644  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (1.388422ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35830]
I0212 07:24:34.739381  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.528392ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35826]
I0212 07:24:34.739628  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.739818  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:34.739857  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:34.739981  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.740076  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.741380  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (1.054947ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35820]
I0212 07:24:34.741476  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (1.49593ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35830]
I0212 07:24:34.742131  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.742468  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26/status: (1.974281ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.743633  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.274554ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35830]
I0212 07:24:34.743728  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-26.15828d04c82b0d2f: (2.894927ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35834]
I0212 07:24:34.744165  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (1.136521ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.744480  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.744680  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:34.744704  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:34.744847  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.744912  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.745337  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (1.166422ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35834]
I0212 07:24:34.746280  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.112757ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.746604  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.747483  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28/status: (2.032717ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35820]
I0212 07:24:34.747865  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.142411ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.748190  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-28.15828d04c7eb90ef: (2.292993ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35834]
I0212 07:24:34.749029  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.133578ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35820]
I0212 07:24:34.749246  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.749452  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:34.749465  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:34.749592  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.749654  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.749738  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (1.077211ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.751130  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.174072ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35836]
I0212 07:24:34.751379  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.261481ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.751426  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.752830  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.06936ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.753036  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-30.15828d04c70e7e7c: (2.666344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35838]
I0212 07:24:34.753386  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30/status: (3.534965ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35820]
I0212 07:24:34.754458  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.143273ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.754966  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.151038ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35838]
I0212 07:24:34.755206  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.755379  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:34.755400  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:34.755518  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.755588  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.755975  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.094482ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.756202  124047 preemption_test.go:598] Cleaning up all pods...
I0212 07:24:34.757122  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (1.132059ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35836]
I0212 07:24:34.757666  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.758310  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20/status: (2.295543ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35838]
I0212 07:24:34.758895  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-20.15828d04b94dcd94: (2.464577ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35840]
I0212 07:24:34.759956  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (1.206356ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35838]
I0212 07:24:34.760271  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.760522  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:34.760543  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:34.760623  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.760681  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.761085  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (4.720555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.762094  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.240865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35840]
I0212 07:24:34.762425  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.762756  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32/status: (1.84188ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35836]
I0212 07:24:34.763821  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-32.15828d04c634dd45: (2.329776ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.764673  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.421491ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35840]
I0212 07:24:34.764950  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.765195  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:34.765220  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:34.765325  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (3.782716ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35842]
I0212 07:24:34.765343  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.765399  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.766769  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.129627ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35844]
I0212 07:24:34.767009  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.767337  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48/status: (1.696012ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.768621  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (859.059µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.768903  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.769069  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-48.15828d04be3e91d7: (2.350516ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35848]
I0212 07:24:34.769106  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:34.769117  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:34.769186  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.769219  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.770193  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (4.310507ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35846]
I0212 07:24:34.770677  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (1.213035ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.770937  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.771529  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31/status: (2.037327ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35844]
I0212 07:24:34.772573  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-31.15828d04ba59e249: (2.605298ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35850]
I0212 07:24:34.772859  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (910.406µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35844]
I0212 07:24:34.773161  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.773305  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:34.773333  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:34.773448  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.773524  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.774605  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (3.900254ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35846]
I0212 07:24:34.774834  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.08548ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35850]
I0212 07:24:34.775091  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.775464  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35/status: (1.635731ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.777099  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.235606ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.777212  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-35.15828d04c4b08b10: (2.947998ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35852]
I0212 07:24:34.777423  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.777591  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:34.777611  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:34.777700  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.777747  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.779792  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.768291ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35850]
I0212 07:24:34.779872  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37/status: (1.900983ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.780142  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (5.20147ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35846]
I0212 07:24:34.780906  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-37.15828d04c4639f09: (2.224309ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35854]
I0212 07:24:34.781728  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.39227ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35850]
I0212 07:24:34.782015  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.782161  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:34.782183  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:34.782261  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.782327  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.783965  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.423162ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35854]
I0212 07:24:34.784346  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.784666  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38/status: (2.10357ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.785599  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-38.15828d04c310c540: (2.437899ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35856]
I0212 07:24:34.785901  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (5.420384ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35846]
I0212 07:24:34.786017  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.020098ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35832]
I0212 07:24:34.786422  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.786631  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:34.786652  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:34.786732  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.786789  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.788029  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.056183ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35854]
I0212 07:24:34.788320  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.789306  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34/status: (2.128726ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35858]
I0212 07:24:34.790374  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-34.15828d04baffb5c2: (2.367588ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35860]
I0212 07:24:34.790992  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (4.718284ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35856]
I0212 07:24:34.791063  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.308185ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35858]
I0212 07:24:34.791446  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.791652  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:34.791676  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:34.791766  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.791853  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.793416  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.167959ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35862]
I0212 07:24:34.793699  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.793955  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33/status: (1.871288ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35854]
I0212 07:24:34.795166  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-33.15828d04c5e73f6a: (2.523961ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35864]
I0212 07:24:34.795600  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (4.221762ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35860]
I0212 07:24:34.795946  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.404865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35854]
I0212 07:24:34.796213  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.796395  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:34.796417  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:34.796580  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.796635  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.798097  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (1.176562ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35862]
I0212 07:24:34.798368  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29/status: (1.504603ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35854]
I0212 07:24:34.798733  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.799953  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-29.15828d04ba055c5c: (2.366184ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35866]
I0212 07:24:34.800887  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (2.061846ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35862]
I0212 07:24:34.800895  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (4.926181ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35864]
I0212 07:24:34.801281  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.801464  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:34.801522  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:34.801660  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.801713  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.803607  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.384641ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35868]
I0212 07:24:34.803974  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.803975  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42/status: (1.981703ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35854]
I0212 07:24:34.805774  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (4.408706ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35866]
I0212 07:24:34.805777  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.221473ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35854]
I0212 07:24:34.806099  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.806264  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:34.806319  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:34.806464  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.806534  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.806579  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-42.15828d04c16984a4: (3.284533ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35870]
I0212 07:24:34.808029  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.070184ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35870]
I0212 07:24:34.808316  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.808670  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47/status: (1.911682ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35868]
I0212 07:24:34.810178  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (3.903954ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35854]
I0212 07:24:34.810195  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.216266ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35868]
I0212 07:24:34.810566  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.810616  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-47.15828d04bf374c90: (2.315868ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35870]
I0212 07:24:34.810802  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:34.810834  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:34.811005  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.811132  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.812418  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.031558ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35870]
I0212 07:24:34.812780  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.813792  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44/status: (1.86082ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35872]
I0212 07:24:34.814388  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-44.15828d04c08a314a: (2.34089ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35874]
I0212 07:24:34.814890  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (4.414994ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35854]
I0212 07:24:34.815346  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.158034ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35872]
I0212 07:24:34.815647  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.815826  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:34.815850  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:34.816063  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.816210  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.817478  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.09833ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35872]
I0212 07:24:34.817975  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.818409  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46/status: (1.923275ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35870]
I0212 07:24:34.819195  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-46.15828d04bf8dc68b: (2.185492ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35876]
I0212 07:24:34.819460  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (4.227903ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35874]
I0212 07:24:34.820272  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.10179ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35870]
I0212 07:24:34.820550  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.820712  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:34.820728  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:34.820805  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.820954  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.822440  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (1.007231ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35872]
I0212 07:24:34.822921  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.822934  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41/status: (1.717723ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35870]
I0212 07:24:34.824285  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (4.370179ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35876]
I0212 07:24:34.824293  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (914.643µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35870]
I0212 07:24:34.824651  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.824833  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:34.824852  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:34.824929  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.824970  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.825271  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-41.15828d04bc3d5801: (2.175601ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35878]
I0212 07:24:34.826989  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.461421ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35880]
I0212 07:24:34.827215  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39/status: (2.031885ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35872]
I0212 07:24:34.828693  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.086423ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35872]
I0212 07:24:34.828932  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-39.15828d04bbd0f4ad: (2.969925ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35878]
I0212 07:24:34.828943  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.829108  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (4.485744ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35876]
I0212 07:24:34.829166  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:34.829184  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:34.829422  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.829473  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.831149  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (1.247374ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35882]
I0212 07:24:34.831400  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45/status: (1.68538ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35880]
I0212 07:24:34.831408  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.832827  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (1.07619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35880]
I0212 07:24:34.833099  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.833271  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:34.833290  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:34.833367  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.833421  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.833712  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-45.15828d04bd0c2c58: (2.701904ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.833715  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (4.290492ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35872]
I0212 07:24:34.834679  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (1.050231ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35880]
I0212 07:24:34.834975  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.836244  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43/status: (2.561601ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35882]
I0212 07:24:34.836742  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-43.15828d04bcbad9d1: (2.524859ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.837682  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (1.025193ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35882]
I0212 07:24:34.838011  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (3.937806ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35872]
I0212 07:24:34.838020  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.838220  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:34.838272  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:34.838420  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.838477  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.840743  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (2.079968ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35880]
I0212 07:24:34.841038  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.842178  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-37.15828d04c4639f09: (2.445616ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35886]
I0212 07:24:34.842606  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37/status: (1.448822ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35880]
I0212 07:24:34.843253  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (4.69532ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.844914  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.692543ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35880]
I0212 07:24:34.845181  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.845337  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:34.845358  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:34.845427  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.845478  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.847807  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (4.198491ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.847810  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (2.003345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35886]
I0212 07:24:34.848192  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.848653  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-49.15828d04bddfd24f: (2.357471ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35892]
I0212 07:24:34.848881  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49/status: (3.167886ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35880]
I0212 07:24:34.850316  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.061134ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35892]
I0212 07:24:34.850703  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.850904  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:34.850943  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:34.851035  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.851119  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.852553  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (4.374785ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.852873  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.543556ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35892]
I0212 07:24:34.853151  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.853260  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39/status: (1.589775ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35888]
I0212 07:24:34.854596  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-39.15828d04bbd0f4ad: (2.695378ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:34.854821  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.047292ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35888]
I0212 07:24:34.855170  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.855333  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:34.855355  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:34.855446  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:34.855563  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:34.857395  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (4.398682ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.857980  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24/status: (1.836551ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35892]
I0212 07:24:34.858706  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (2.931679ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:34.858713  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-24.15828d04c948c509: (2.245624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.859083  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:34.859458  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (995.395µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35892]
I0212 07:24:34.859777  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:34.860535  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:34.860584  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:34.861864  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (4.073332ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.862269  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.43688ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.864719  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:34.864758  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:34.866256  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (4.023546ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.866380  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.247498ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.869067  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:34.869163  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:34.870379  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (3.770434ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.870922  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.437559ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.873234  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:34.873311  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:34.874663  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (3.924421ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.875025  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.361973ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.877531  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:34.877607  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:34.878726  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (3.635328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.879648  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.783174ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.881757  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:34.881800  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:34.882831  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (3.643816ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.883445  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.398942ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.885782  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:34.885817  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:34.887096  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (3.897275ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.887448  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.380875ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.889805  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:34.889836  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:34.891226  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (3.813665ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.891684  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.561701ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.894119  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:34.894154  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:34.895277  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (3.718538ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.895635  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.246998ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.898671  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:34.898721  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:34.899946  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (4.303075ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.900642  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.54745ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.902839  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:34.902878  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:34.904141  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (3.822485ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.904588  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.436389ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.906921  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:34.906963  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:34.908201  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (3.726585ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.908707  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.464565ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.911140  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:34.911181  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:34.912660  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (4.035673ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.912950  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.493616ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.915975  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:34.916023  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:34.917071  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (3.92779ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.917761  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.442557ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.920219  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:34.920278  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:34.921709  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (4.214625ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.922086  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.448325ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.924635  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:34.924689  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:34.925882  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (3.787041ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.926532  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.490966ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.929163  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:34.929208  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:34.930665  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (4.264511ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.930925  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.470257ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.933671  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:34.933713  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:34.935580  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.587212ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.935753  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (4.597196ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.938659  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:34.938700  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:34.940267  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (4.128687ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.940575  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.469325ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.943244  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:34.943289  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:34.944547  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (3.924429ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.945248  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.616219ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.947860  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:34.947904  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:34.949485  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (4.658016ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.949776  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.502437ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.952630  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:34.952712  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:34.953959  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (4.072507ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.954390  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.406831ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.956894  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:34.956933  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:34.958851  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (4.487415ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.958851  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.549495ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35896]
I0212 07:24:34.961645  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:34.961695  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:34.963336  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.413807ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:34.963530  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (4.28977ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.966648  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:34.966701  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:34.968103  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (4.201569ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.968468  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.453002ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:34.970921  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:34.970970  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:34.972345  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (3.915616ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.972692  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.43609ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:34.975374  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:34.975413  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:34.976741  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (4.024275ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.977218  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.544271ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:34.979913  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:34.979950  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:34.981263  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (4.163285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.982250  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.038915ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:34.984235  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:34.984275  124047 scheduler.go:449] Skip schedule deleting pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:34.985548  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (3.975931ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.986079  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.431936ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:34.989573  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-0: (3.750586ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.991021  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-1: (1.09697ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.995638  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (4.080741ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:34.998309  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (1.103493ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.001193  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.167772ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.003686  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (977.874µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.006276  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (1.043874ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.008849  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (974.268µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.011410  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (1.002925ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.014114  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (1.074779ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.016677  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (997.41µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.019209  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (938.292µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.021734  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (946.075µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.024351  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.066087ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.026873  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (859.727µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.029633  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (1.066178ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.032095  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (897.2µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.034795  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (1.028544ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.037284  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (888.265µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.041339  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (2.168449ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.043966  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (988.199µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.046468  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (830.739µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.049469  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.298934ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.052160  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (994.628µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.054709  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (988.845µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.057836  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.500586ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.060387  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (974.936µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.063179  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (1.182417ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.066409  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (1.619191ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.069004  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (984.597µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.071767  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (1.128669ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.074461  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.051478ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.077002  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (973.051µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.079683  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (994.681µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.082281  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (974.583µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.084980  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.012774ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.087689  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.087499ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.090156  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (889.934µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.092706  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (966.995µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.095171  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (908.274µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.097707  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (931.877µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.100438  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.055193ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.103097  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.027687ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.105599  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.031004ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.108330  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (1.045294ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.111073  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.086036ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.113533  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (921.12µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.116156  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.006128ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.118795  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (1.015324ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.121738  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.337097ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.124669  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.16947ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.127243  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (993.222µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.129866  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.097758ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.132568  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-0: (1.004779ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.135275  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-1: (1.054002ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.139854  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.609272ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.142784  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.351922ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.143551  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-0
I0212 07:24:35.143576  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-0
I0212 07:24:35.143701  124047 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-0", node "node1"
I0212 07:24:35.143722  124047 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I0212 07:24:35.143772  124047 factory.go:733] Attempting to bind rpod-0 to node1
I0212 07:24:35.145487  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-0/binding: (1.47048ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.145598  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.798706ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.145683  124047 scheduler.go:571] pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 07:24:35.146402  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-1
I0212 07:24:35.146425  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-1
I0212 07:24:35.146592  124047 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-1", node "node1"
I0212 07:24:35.146615  124047 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I0212 07:24:35.146673  124047 factory.go:733] Attempting to bind rpod-1 to node1
I0212 07:24:35.147531  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.548419ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.148410  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-1/binding: (1.504715ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.148964  124047 scheduler.go:571] pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 07:24:35.150866  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.578453ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.248194  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-0: (1.821979ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.351016  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-1: (1.776161ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.351350  124047 preemption_test.go:561] Creating the preemptor pod...
I0212 07:24:35.353760  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.04606ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.353960  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod
I0212 07:24:35.354000  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod
I0212 07:24:35.354069  124047 preemption_test.go:567] Creating additional pods...
I0212 07:24:35.354324  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.354390  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.356247  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.84615ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.356621  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.431068ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35902]
I0212 07:24:35.356732  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod/status: (2.041843ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.356774  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.772214ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35900]
I0212 07:24:35.358194  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.533763ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.358357  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.207654ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.358642  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.360208  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.613347ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.360697  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod/status: (1.685081ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.362175  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.543934ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.363850  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.318519ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.365181  124047 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/rpod-1: (4.114271ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.365409  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0
I0212 07:24:35.365432  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0
I0212 07:24:35.365583  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.365627  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.365792  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.563098ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.366835  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.251988ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.367079  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (1.036176ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.367354  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0/status: (1.526264ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35902]
I0212 07:24:35.367596  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.470424ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35904]
I0212 07:24:35.368794  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.487573ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.369028  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (1.323429ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35902]
I0212 07:24:35.369317  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.380583ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35904]
I0212 07:24:35.369347  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.369543  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4
I0212 07:24:35.369562  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4
I0212 07:24:35.369656  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.369722  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.370782  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (940.282µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.371283  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.50525ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.371905  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.601807ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35908]
I0212 07:24:35.372600  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4/status: (2.43078ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35906]
I0212 07:24:35.373115  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.397176ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.374170  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (1.121149ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35908]
I0212 07:24:35.374484  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.374668  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6
I0212 07:24:35.374689  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6
I0212 07:24:35.374773  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.374809  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.374944  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.380097ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.376108  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (1.010203ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.376781  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.56629ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.376944  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6/status: (1.927346ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35908]
I0212 07:24:35.376963  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.425286ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35910]
I0212 07:24:35.378462  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (1.096997ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.378650  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.286279ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.378708  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.378913  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9
I0212 07:24:35.378933  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9
I0212 07:24:35.379005  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.379059  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.380827  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.752334ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.381210  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.656267ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35914]
I0212 07:24:35.381290  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9/status: (2.019613ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.381560  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (2.108486ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35912]
I0212 07:24:35.382886  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (1.216688ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35884]
I0212 07:24:35.382973  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.679155ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35894]
I0212 07:24:35.383156  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.383299  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12
I0212 07:24:35.383317  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12
I0212 07:24:35.383388  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.383432  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.384861  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (1.174208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35914]
I0212 07:24:35.385429  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.435868ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35918]
I0212 07:24:35.385723  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.156811ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35912]
I0212 07:24:35.386402  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12/status: (1.743526ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35916]
I0212 07:24:35.387845  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.431239ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35918]
I0212 07:24:35.387992  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (1.225465ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35916]
I0212 07:24:35.388273  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.388478  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14
I0212 07:24:35.388528  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14
I0212 07:24:35.388623  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.388681  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.389718  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.462483ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35918]
I0212 07:24:35.390658  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.274584ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35922]
I0212 07:24:35.391001  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (1.823039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35920]
I0212 07:24:35.391093  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14/status: (2.203995ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35914]
I0212 07:24:35.392622  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (1.158594ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35920]
I0212 07:24:35.392724  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.325951ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35918]
I0212 07:24:35.392884  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.393106  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16
I0212 07:24:35.393128  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16
I0212 07:24:35.393299  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.393362  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.394963  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (1.105215ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35922]
I0212 07:24:35.395361  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.039249ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35920]
I0212 07:24:35.395396  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.490605ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35926]
I0212 07:24:35.396075  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16/status: (2.152997ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35924]
I0212 07:24:35.397320  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.539939ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35920]
I0212 07:24:35.397766  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (1.266688ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35924]
I0212 07:24:35.398086  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.398295  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18
I0212 07:24:35.398330  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18
I0212 07:24:35.398451  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.398535  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.399440  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.690436ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35920]
I0212 07:24:35.400471  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18/status: (1.690218ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35924]
I0212 07:24:35.401107  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.938982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35922]
I0212 07:24:35.401185  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.440451ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35920]
I0212 07:24:35.401917  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.039175ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35924]
I0212 07:24:35.402217  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.402245  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.876108ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35928]
I0212 07:24:35.402377  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:35.402390  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:35.402472  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.402552  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.405006  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (1.001828ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35930]
I0212 07:24:35.405011  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20/status: (2.229853ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35920]
I0212 07:24:35.405117  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.734537ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35932]
I0212 07:24:35.405517  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.254293ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35922]
I0212 07:24:35.406680  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (1.127458ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35920]
I0212 07:24:35.406992  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.407177  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:35.407193  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:35.407306  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.407340  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.434258ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35932]
I0212 07:24:35.407364  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.408884  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.225366ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35930]
I0212 07:24:35.409430  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.553405ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35936]
I0212 07:24:35.409462  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22/status: (1.876041ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35920]
I0212 07:24:35.409540  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.619773ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35934]
I0212 07:24:35.410904  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.112484ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35920]
I0212 07:24:35.411219  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.411458  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:35.411481  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:35.411549  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.648671ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35930]
I0212 07:24:35.411605  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.411659  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.413484  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.334787ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35930]
I0212 07:24:35.413602  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23/status: (1.755023ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35920]
I0212 07:24:35.413668  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.48595ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35938]
I0212 07:24:35.413846  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.671684ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35940]
I0212 07:24:35.415264  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.306788ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35930]
I0212 07:24:35.415535  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.415566  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.472176ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35940]
I0212 07:24:35.415710  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:35.415756  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:35.415867  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.415925  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.417633  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.588389ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35930]
I0212 07:24:35.418211  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (1.826189ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35942]
I0212 07:24:35.418127  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25/status: (1.977614ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35920]
I0212 07:24:35.418861  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.466986ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35944]
I0212 07:24:35.419616  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (1.061152ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35920]
I0212 07:24:35.419912  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.420005  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.440253ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35930]
I0212 07:24:35.420214  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:35.420235  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:35.420347  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.420399  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.421736  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (1.028193ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35944]
I0212 07:24:35.422399  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.544325ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35948]
I0212 07:24:35.422627  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27/status: (1.882111ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35946]
I0212 07:24:35.422938  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.527251ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35920]
I0212 07:24:35.424203  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (1.117025ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35948]
I0212 07:24:35.424538  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.424919  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:35.424937  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:35.424940  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.529169ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35920]
I0212 07:24:35.425085  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.425127  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.426484  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.15619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35944]
I0212 07:24:35.426958  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30/status: (1.583575ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35948]
I0212 07:24:35.427004  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.379225ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35952]
I0212 07:24:35.427944  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.415761ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35950]
I0212 07:24:35.428409  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.029202ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35948]
I0212 07:24:35.428648  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.428866  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:35.428887  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:35.428978  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.429037  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.430305  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (977.942µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35944]
I0212 07:24:35.430892  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31/status: (1.63432ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35948]
I0212 07:24:35.431099  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.496938ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35954]
I0212 07:24:35.431118  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.764685ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35950]
I0212 07:24:35.432423  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (1.116028ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35948]
I0212 07:24:35.432709  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.432904  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:35.432926  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:35.433110  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.433193  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.433252  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.585422ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35954]
I0212 07:24:35.434546  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.085654ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35944]
I0212 07:24:35.435297  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.657787ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35954]
I0212 07:24:35.435310  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.443551ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35956]
I0212 07:24:35.435357  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33/status: (1.886063ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35948]
I0212 07:24:35.437030  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.141631ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35954]
I0212 07:24:35.437304  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.437408  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.417148ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35944]
I0212 07:24:35.437519  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:35.437539  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:35.437632  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.437671  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.439473  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.640296ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35944]
I0212 07:24:35.439982  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31/status: (2.096744ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35954]
I0212 07:24:35.440889  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (2.799162ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35958]
I0212 07:24:35.441466  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-31: (1.052914ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35954]
I0212 07:24:35.441812  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.442086  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:35.442104  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:35.442307  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.442375  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-31.15828d053e558a30: (4.039845ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35960]
I0212 07:24:35.442385  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.442636  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.160993ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35944]
I0212 07:24:35.443965  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.305082ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35958]
I0212 07:24:35.444735  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.819646ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35944]
I0212 07:24:35.444862  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.774464ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35962]
I0212 07:24:35.444937  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36/status: (2.306083ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35954]
I0212 07:24:35.446598  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.249729ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35944]
I0212 07:24:35.446808  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.442956ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35958]
I0212 07:24:35.446826  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.446980  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:35.446997  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:35.447138  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.447196  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.448615  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.18879ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35944]
I0212 07:24:35.448669  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.460791ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35958]
I0212 07:24:35.448979  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.287872ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35966]
I0212 07:24:35.449193  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38/status: (1.566557ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35964]
I0212 07:24:35.450668  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.450888ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35958]
I0212 07:24:35.450714  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.151002ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35966]
I0212 07:24:35.450995  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.451161  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:35.451196  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:35.451272  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.451318  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.452688  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.582996ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35958]
I0212 07:24:35.453269  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41/status: (1.708793ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35944]
I0212 07:24:35.453442  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.394237ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35970]
I0212 07:24:35.454562  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.546728ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35958]
I0212 07:24:35.454695  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (1.06006ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35944]
I0212 07:24:35.455035  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.455120  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (3.286103ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35968]
I0212 07:24:35.455239  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:35.455260  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:35.455424  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.455471  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.461002  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43/status: (5.210503ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35970]
I0212 07:24:35.461553  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (5.82658ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35968]
I0212 07:24:35.461903  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (6.909145ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35958]
I0212 07:24:35.462957  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (6.959663ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35972]
I0212 07:24:35.465522  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (3.249262ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35958]
I0212 07:24:35.466420  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (4.550549ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35970]
I0212 07:24:35.466678  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.466957  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:35.466980  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:35.467111  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.467162  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.469331  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (2.861798ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35972]
I0212 07:24:35.469369  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.593815ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35980]
I0212 07:24:35.469605  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (2.043025ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35968]
I0212 07:24:35.470213  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45/status: (2.718997ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35970]
I0212 07:24:35.471564  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods: (1.7539ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35980]
I0212 07:24:35.472428  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (1.482034ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35968]
I0212 07:24:35.472702  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.472895  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:35.472914  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:35.473002  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.473062  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.474960  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.211328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35972]
I0212 07:24:35.475393  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.637615ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35982]
I0212 07:24:35.475947  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47/status: (2.643835ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35980]
I0212 07:24:35.477744  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.32887ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35982]
I0212 07:24:35.478056  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.478241  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:35.478263  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:35.478355  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.478402  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.480287  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.359833ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35972]
I0212 07:24:35.480807  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.725319ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35984]
I0212 07:24:35.480987  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49/status: (2.053573ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35982]
I0212 07:24:35.482745  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.290405ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35984]
I0212 07:24:35.483216  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.483455  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:35.483516  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:35.483650  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.483697  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.485964  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.504662ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35972]
I0212 07:24:35.486612  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47/status: (2.598933ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35984]
I0212 07:24:35.488097  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-47.15828d0540f5336d: (2.992301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35986]
I0212 07:24:35.488753  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.39124ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35984]
I0212 07:24:35.489146  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.489443  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:35.489465  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:35.489602  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.489659  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.491231  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.182279ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35972]
I0212 07:24:35.491574  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49/status: (1.658953ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35986]
I0212 07:24:35.493651  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.663805ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35972]
I0212 07:24:35.493939  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.494202  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:35.494227  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:35.494416  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.494564  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.494733  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-49.15828d054146dd39: (2.986687ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35988]
I0212 07:24:35.495861  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (993.887µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35972]
I0212 07:24:35.496366  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45/status: (1.454996ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35986]
I0212 07:24:35.497915  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (1.080735ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35986]
I0212 07:24:35.498206  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.498367  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-45.15828d05409b5f30: (2.213608ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35972]
I0212 07:24:35.498421  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:35.498435  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:35.498626  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.498689  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.500148  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.117197ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35988]
I0212 07:24:35.500867  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48/status: (1.75395ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35986]
I0212 07:24:35.502527  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.895ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35990]
I0212 07:24:35.502972  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.69444ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35986]
I0212 07:24:35.503285  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.503458  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:35.503530  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:35.503641  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.503713  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.505465  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.203627ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35988]
I0212 07:24:35.506080  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.666731ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35992]
I0212 07:24:35.507107  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46/status: (2.753899ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35990]
I0212 07:24:35.508803  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.161103ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35992]
I0212 07:24:35.509096  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.509339  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:35.509374  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:35.509550  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.509608  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.511004  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.087302ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35988]
I0212 07:24:35.512142  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48/status: (2.266638ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35992]
I0212 07:24:35.513108  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-48.15828d05427c6aa8: (2.356329ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35994]
I0212 07:24:35.513870  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.154182ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35992]
I0212 07:24:35.514144  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.514358  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:35.514378  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:35.514532  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.514587  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.516478  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46/status: (1.640825ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35994]
I0212 07:24:35.516674  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.135868ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35988]
I0212 07:24:35.518087  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.170819ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35988]
I0212 07:24:35.518315  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.518575  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:35.518600  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:35.518700  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-46.15828d0542c90e65: (3.204418ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35996]
I0212 07:24:35.519078  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.519149  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.521195  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (1.580488ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35994]
I0212 07:24:35.521960  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43/status: (2.31793ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35988]
I0212 07:24:35.523105  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-43.15828d053fe8fe9a: (3.161799ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35998]
I0212 07:24:35.523797  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (1.464685ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35988]
I0212 07:24:35.524128  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.524416  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:35.524441  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:35.524636  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.524692  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.526292  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (1.240521ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35994]
I0212 07:24:35.527211  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41/status: (2.128773ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35998]
I0212 07:24:35.528587  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-41.15828d053fa99a92: (2.86849ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36000]
I0212 07:24:35.528907  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (1.1729ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35998]
I0212 07:24:35.529247  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.529423  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:35.529445  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:35.529569  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.529618  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.531762  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.585491ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36002]
I0212 07:24:35.532079  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.881343ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35994]
I0212 07:24:35.532210  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44/status: (2.307302ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36000]
I0212 07:24:35.534022  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.137643ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35994]
I0212 07:24:35.534319  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.534531  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:35.534548  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:35.534636  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.534684  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.536470  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.075685ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36002]
I0212 07:24:35.536808  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.410432ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36004]
I0212 07:24:35.536938  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42/status: (1.979655ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:35994]
I0212 07:24:35.538731  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.311661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36004]
I0212 07:24:35.539001  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.539219  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:35.539240  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:35.539376  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.539437  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.541314  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44/status: (1.604596ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36004]
I0212 07:24:35.542300  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.736194ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36002]
I0212 07:24:35.542562  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-44.15828d0544546136: (2.212901ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36006]
I0212 07:24:35.543195  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.463596ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36004]
I0212 07:24:35.543481  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.543731  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:35.543757  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:35.543894  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.544020  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.545339  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.073197ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36002]
I0212 07:24:35.546471  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42/status: (2.219242ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36006]
I0212 07:24:35.547224  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-42.15828d0544a1af92: (2.346503ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36008]
I0212 07:24:35.548025  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (999.544µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36006]
I0212 07:24:35.548325  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.548549  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:35.548573  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:35.548726  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.548777  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.550022  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.012724ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36002]
I0212 07:24:35.550639  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38/status: (1.637031ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36008]
I0212 07:24:35.552584  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-38.15828d053f6ab4ce: (2.989797ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36010]
I0212 07:24:35.553327  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.067024ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36008]
I0212 07:24:35.553762  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.554172  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:35.554229  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:35.554463  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.554574  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.556022  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.044355ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36002]
I0212 07:24:35.556926  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.64734ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36012]
I0212 07:24:35.557485  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40/status: (2.641005ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36010]
I0212 07:24:35.559391  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.508574ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36012]
I0212 07:24:35.559674  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.559865  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:35.559900  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:35.560010  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.560073  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.561544  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.225618ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36002]
I0212 07:24:35.562292  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36/status: (1.961904ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36012]
I0212 07:24:35.563833  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-36.15828d053f214cc6: (2.61681ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36014]
I0212 07:24:35.563940  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.175395ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36012]
I0212 07:24:35.564252  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.564525  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:35.564569  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:35.564709  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.564766  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.566589  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.46685ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36002]
I0212 07:24:35.567258  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40/status: (2.186274ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36014]
I0212 07:24:35.567989  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-40.15828d0545d12747: (2.407462ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36016]
I0212 07:24:35.569076  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (949.486µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36014]
I0212 07:24:35.569409  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.569609  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:35.569631  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:35.569716  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.569766  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.571338  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.148721ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36002]
I0212 07:24:35.572258  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39/status: (2.166371ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36016]
I0212 07:24:35.572932  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.314444ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36018]
I0212 07:24:35.573847  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.177849ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36016]
I0212 07:24:35.573955  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.67716ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36002]
I0212 07:24:35.574558  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.574756  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:35.574777  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:35.574912  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.574973  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.576240  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (999.421µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36016]
I0212 07:24:35.576905  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37/status: (1.659965ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36018]
I0212 07:24:35.577235  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.333998ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36020]
I0212 07:24:35.578596  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.153675ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36018]
I0212 07:24:35.578910  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.579124  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:35.579173  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:35.579308  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.579379  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.580819  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.197351ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36020]
I0212 07:24:35.581542  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39/status: (1.882036ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36016]
I0212 07:24:35.582949  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-39.15828d0546b8fc0b: (2.764482ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36022]
I0212 07:24:35.583698  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.16633ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36016]
I0212 07:24:35.583991  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.584195  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:35.584221  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:35.584404  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.584463  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.585775  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.04323ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36020]
I0212 07:24:35.587301  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37/status: (2.555642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36022]
I0212 07:24:35.589175  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-37.15828d0547087077: (3.732339ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36024]
I0212 07:24:35.589216  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.446821ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36022]
I0212 07:24:35.589429  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.589648  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:35.589672  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:35.589822  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.589885  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.591159  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.00696ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36020]
I0212 07:24:35.591949  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33/status: (1.827538ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36022]
I0212 07:24:35.593241  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-33.15828d053e94faac: (2.702134ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36026]
I0212 07:24:35.594247  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.399582ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36022]
I0212 07:24:35.594629  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.594848  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:35.594867  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:35.594986  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.595065  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.596530  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.119571ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36020]
I0212 07:24:35.597187  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.539266ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36028]
I0212 07:24:35.597672  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35/status: (2.342274ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36026]
I0212 07:24:35.599677  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.418054ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36028]
I0212 07:24:35.599970  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.600239  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:35.600258  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:35.600370  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.600419  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.602291  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.556725ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36020]
I0212 07:24:35.602465  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.509705ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36030]
I0212 07:24:35.602557  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34/status: (1.895912ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36028]
I0212 07:24:35.604432  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.110558ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36028]
I0212 07:24:35.604699  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.604910  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:35.604931  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:35.605061  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.605140  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.606756  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.379728ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36028]
I0212 07:24:35.607687  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35/status: (1.925802ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36030]
I0212 07:24:35.608457  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-35.15828d05483abeb5: (2.07822ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36032]
I0212 07:24:35.609624  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.082084ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36030]
I0212 07:24:35.609929  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.610351  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:35.610375  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:35.610465  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.610544  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.612666  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34/status: (1.886759ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36032]
I0212 07:24:35.612833  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.498191ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36028]
I0212 07:24:35.613852  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-34.15828d05488cb876: (2.436806ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36034]
I0212 07:24:35.614969  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.867719ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36028]
I0212 07:24:35.615270  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.615437  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:35.615448  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:35.615604  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.615654  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.617218  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.265206ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36032]
I0212 07:24:35.617688  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30/status: (1.795842ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36034]
I0212 07:24:35.618681  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-30.15828d053e19fae9: (2.276924ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36036]
I0212 07:24:35.619609  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.185728ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36034]
I0212 07:24:35.620611  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.620819  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:35.620830  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:35.620940  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.620978  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.623574  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.678376ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36038]
I0212 07:24:35.623785  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.854873ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36032]
I0212 07:24:35.624159  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32/status: (2.690137ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36036]
I0212 07:24:35.625756  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.109173ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36032]
I0212 07:24:35.626119  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.626307  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:35.626328  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27
I0212 07:24:35.626414  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.626463  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.628642  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (1.474128ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36038]
I0212 07:24:35.629305  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27/status: (2.069633ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36032]
I0212 07:24:35.630877  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-27.15828d053dd1c6c6: (2.753931ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36040]
I0212 07:24:35.631017  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-27: (1.253314ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36032]
I0212 07:24:35.631317  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.631550  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:35.631570  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32
I0212 07:24:35.631673  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.631717  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.633145  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.192828ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36038]
I0212 07:24:35.633556  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32/status: (1.617892ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36040]
I0212 07:24:35.635126  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-32.15828d0549c66ef4: (2.409415ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36042]
I0212 07:24:35.635399  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-32: (1.429982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36040]
I0212 07:24:35.635731  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.635996  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:35.636018  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25
I0212 07:24:35.636160  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.636254  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.637665  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (1.118839ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36038]
I0212 07:24:35.638323  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25/status: (1.728676ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36042]
I0212 07:24:35.639784  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-25.15828d053d8d8c91: (2.47162ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36044]
I0212 07:24:35.640836  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-25: (2.12406ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36042]
I0212 07:24:35.641187  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.641417  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:35.641440  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:35.641591  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.641650  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.644030  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.632436ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36046]
I0212 07:24:35.644685  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29/status: (2.778861ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36044]
I0212 07:24:35.646009  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (1.187417ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36038]
I0212 07:24:35.647084  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (1.857585ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36044]
I0212 07:24:35.647380  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.647596  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:35.647616  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:35.647736  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.647808  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.649059  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (974.801µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36046]
I0212 07:24:35.649791  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28/status: (1.764948ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36038]
I0212 07:24:35.650191  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.794388ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36048]
I0212 07:24:35.651600  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.249578ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36038]
I0212 07:24:35.651866  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.652062  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:35.652083  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29
I0212 07:24:35.652228  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.652286  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.653888  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (1.145741ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36046]
I0212 07:24:35.654695  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29/status: (2.144882ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36048]
I0212 07:24:35.655330  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-29.15828d054b01d473: (2.203386ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36050]
I0212 07:24:35.656140  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-29: (1.101361ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36048]
I0212 07:24:35.656413  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.656603  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:35.656624  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28
I0212 07:24:35.656734  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.656791  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.658334  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.149136ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36050]
I0212 07:24:35.659546  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28/status: (2.402885ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36046]
I0212 07:24:35.661372  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-28.15828d054b5f793e: (2.404945ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36050]
I0212 07:24:35.661386  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-28: (1.513626ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36046]
I0212 07:24:35.661660  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.661795  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:35.661813  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23
I0212 07:24:35.661903  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.661947  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.663806  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23/status: (1.596451ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36046]
I0212 07:24:35.664401  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.503646ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36052]
I0212 07:24:35.664797  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-23.15828d053d4c6629: (2.21384ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36050]
I0212 07:24:35.665487  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.044297ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36046]
I0212 07:24:35.665730  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.665850  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:35.665870  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26
I0212 07:24:35.665952  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.665995  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.668107  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (1.481463ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36052]
I0212 07:24:35.668736  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.083288ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36054]
I0212 07:24:35.669011  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26/status: (2.798895ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36050]
I0212 07:24:35.670645  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-26: (1.122918ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36054]
I0212 07:24:35.670920  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.671136  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:35.671150  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22
I0212 07:24:35.671256  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.671303  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.672645  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.092753ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36054]
I0212 07:24:35.673425  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22/status: (1.855017ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36052]
I0212 07:24:35.674415  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-22.15828d053d0ae9a2: (2.376786ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36056]
I0212 07:24:35.675442  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.286941ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36052]
I0212 07:24:35.675699  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.675951  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:35.676104  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:35.676168  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.249968ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36056]
I0212 07:24:35.676296  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.676366  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.677977  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (985.159µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36054]
I0212 07:24:35.679393  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.400492ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36058]
I0212 07:24:35.680159  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24/status: (3.182034ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36052]
I0212 07:24:35.682274  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (1.539799ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36058]
I0212 07:24:35.682655  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.682800  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18
I0212 07:24:35.682846  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18
I0212 07:24:35.683019  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.683094  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.684729  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.331965ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36054]
I0212 07:24:35.685160  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18/status: (1.735803ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36058]
I0212 07:24:35.686178  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-18.15828d053c841b07: (2.260081ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36060]
I0212 07:24:35.686650  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.068037ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36058]
I0212 07:24:35.686960  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.687169  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:35.687188  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24
I0212 07:24:35.687305  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.687361  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.688738  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (1.13173ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36054]
I0212 07:24:35.689324  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24/status: (1.653825ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36060]
I0212 07:24:35.691197  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-24.15828d054d138b0f: (2.725838ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36062]
I0212 07:24:35.691242  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-24: (1.419646ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36060]
I0212 07:24:35.691563  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.691754  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:35.691776  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:35.691881  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.691938  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.694012  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.45995ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36064]
I0212 07:24:35.694017  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21/status: (1.779925ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36062]
I0212 07:24:35.695298  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (3.064316ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36054]
I0212 07:24:35.695522  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (948.734µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36062]
I0212 07:24:35.695858  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.696032  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:35.696064  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:35.696171  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.696228  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.697854  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.030903ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36064]
I0212 07:24:35.698544  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19/status: (1.714171ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36054]
I0212 07:24:35.699211  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.423038ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36066]
I0212 07:24:35.700703  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.27681ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36054]
I0212 07:24:35.701055  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.701298  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:35.701329  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21
I0212 07:24:35.701425  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.701470  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.703462  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (1.160863ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36064]
I0212 07:24:35.703803  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21/status: (2.029595ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36066]
I0212 07:24:35.704791  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-21.15828d054e012cce: (2.435231ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36068]
I0212 07:24:35.705809  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (1.278399ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36066]
I0212 07:24:35.706083  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.706249  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:35.706268  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19
I0212 07:24:35.706392  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.706451  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.707824  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.137428ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36064]
I0212 07:24:35.708253  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19/status: (1.503129ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36068]
I0212 07:24:35.709592  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-19.15828d054e42a323: (2.411828ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36070]
I0212 07:24:35.709645  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.077468ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36068]
I0212 07:24:35.709896  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.710070  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16
I0212 07:24:35.710091  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16
I0212 07:24:35.710208  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.710271  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.711883  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (1.261499ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36064]
I0212 07:24:35.712147  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16/status: (1.575958ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36070]
I0212 07:24:35.713166  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-16.15828d053c354275: (2.185354ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36072]
I0212 07:24:35.713709  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (1.114542ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36070]
I0212 07:24:35.714003  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.714213  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14
I0212 07:24:35.714231  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14
I0212 07:24:35.714348  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.714405  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.715809  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (1.133936ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36064]
I0212 07:24:35.716136  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:35.716932  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14/status: (2.265374ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36072]
I0212 07:24:35.717935  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-14.15828d053bedd216: (2.626654ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36074]
I0212 07:24:35.718401  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (1.050865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36072]
I0212 07:24:35.718752  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.718962  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17
I0212 07:24:35.718980  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17
I0212 07:24:35.719087  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.719136  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.720413  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.005511ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36064]
I0212 07:24:35.721253  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17/status: (1.848833ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36074]
I0212 07:24:35.721608  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.891272ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36076]
I0212 07:24:35.722810  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.091913ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36074]
I0212 07:24:35.723198  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.723468  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15
I0212 07:24:35.723511  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15
I0212 07:24:35.723635  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.723700  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.725264  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.282584ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36064]
I0212 07:24:35.725366  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:35.725727  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:35.726036  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.696902ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36078]
I0212 07:24:35.726334  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15/status: (2.354088ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36076]
I0212 07:24:35.728203  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.364397ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36078]
I0212 07:24:35.728485  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.728679  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17
I0212 07:24:35.728706  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17
I0212 07:24:35.728838  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.728903  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.730642  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.482712ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36064]
I0212 07:24:35.730656  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17/status: (1.488811ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36078]
I0212 07:24:35.730676  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:35.730679  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:35.730698  124047 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 07:24:35.732402  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-17.15828d054fa03485: (2.69011ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36080]
I0212 07:24:35.732432  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.288373ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36064]
I0212 07:24:35.732780  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.733030  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15
I0212 07:24:35.733062  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15
I0212 07:24:35.733186  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.733243  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.734843  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.341404ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36078]
I0212 07:24:35.735108  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15/status: (1.571135ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36080]
I0212 07:24:35.735154  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:35.736391  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-15.15828d054fe5c332: (2.262068ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36082]
I0212 07:24:35.736980  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.384148ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36080]
I0212 07:24:35.737271  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.737437  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12
I0212 07:24:35.737458  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12
I0212 07:24:35.737585  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.737640  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.740084  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (1.223726ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36084]
I0212 07:24:35.740597  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12/status: (1.867514ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36082]
I0212 07:24:35.741592  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-12.15828d053b9dc2dd: (2.855252ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36078]
I0212 07:24:35.742108  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (1.039641ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36082]
I0212 07:24:35.742410  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.742626  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9
I0212 07:24:35.742650  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9
I0212 07:24:35.742805  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.742862  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.744543  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (1.465974ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36078]
I0212 07:24:35.744950  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9/status: (1.855525ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36084]
I0212 07:24:35.745682  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-9.15828d053b5acda0: (2.270879ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36086]
I0212 07:24:35.746654  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (1.26922ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36084]
I0212 07:24:35.746907  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.747105  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13
I0212 07:24:35.747125  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13
I0212 07:24:35.747246  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.747305  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.748688  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (1.090094ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36086]
I0212 07:24:35.749566  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13/status: (1.961619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36078]
I0212 07:24:35.749727  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.86845ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36088]
I0212 07:24:35.750967  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (1.059967ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36078]
I0212 07:24:35.751296  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.751471  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11
I0212 07:24:35.751513  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11
I0212 07:24:35.751626  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.751672  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.753464  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (1.345638ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36086]
I0212 07:24:35.753906  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.666848ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36090]
I0212 07:24:35.754072  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11/status: (1.930009ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36088]
I0212 07:24:35.755719  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (1.136835ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36090]
I0212 07:24:35.756034  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.756235  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13
I0212 07:24:35.756253  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13
I0212 07:24:35.756379  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.756431  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.759030  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (2.152283ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36086]
I0212 07:24:35.759159  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13/status: (1.905629ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36090]
I0212 07:24:35.760469  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (1.010362ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36090]
I0212 07:24:35.760831  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-13.15828d05514dff4d: (2.035207ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36092]
I0212 07:24:35.760871  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.761030  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11
I0212 07:24:35.761066  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11
I0212 07:24:35.761201  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.761256  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.763165  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (1.49601ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36086]
I0212 07:24:35.763791  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11/status: (2.311459ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36090]
I0212 07:24:35.764690  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-11.15828d055190b1a8: (2.60455ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36094]
I0212 07:24:35.765190  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (1.033643ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36090]
I0212 07:24:35.765429  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.765626  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6
I0212 07:24:35.765644  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6
I0212 07:24:35.765708  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.765745  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.767621  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (1.231549ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36086]
I0212 07:24:35.768390  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6/status: (1.71908ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36094]
I0212 07:24:35.768780  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-6.15828d053b1a361c: (2.313619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36096]
I0212 07:24:35.770231  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (986.026µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36096]
I0212 07:24:35.777264  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.779195  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10
I0212 07:24:35.779248  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10
I0212 07:24:35.779430  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.779529  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.783182  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.817042ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36098]
I0212 07:24:35.783223  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.978573ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36096]
I0212 07:24:35.783657  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10/status: (2.411341ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36086]
I0212 07:24:35.783812  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.568014ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36100]
I0212 07:24:35.785443  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.289519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36096]
I0212 07:24:35.785745  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.785912  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8
I0212 07:24:35.785932  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8
I0212 07:24:35.786063  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.786119  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.789991  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (3.160783ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36102]
I0212 07:24:35.790002  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8/status: (3.661476ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36096]
I0212 07:24:35.790321  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (3.957073ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36098]
I0212 07:24:35.791552  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (1.138968ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36102]
I0212 07:24:35.791814  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.792671  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10
I0212 07:24:35.792694  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10
I0212 07:24:35.792836  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.792886  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.795560  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10/status: (2.340741ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36098]
I0212 07:24:35.797086  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (3.422515ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36096]
I0212 07:24:35.797618  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-10.15828d05533937b8: (3.825416ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36104]
I0212 07:24:35.797693  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.754934ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36098]
I0212 07:24:35.797956  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.798260  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:35.798442  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8
I0212 07:24:35.798598  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8
I0212 07:24:35.798758  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.798832  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.800281  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (1.172116ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36096]
I0212 07:24:35.801266  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8/status: (2.182284ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36104]
I0212 07:24:35.803189  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (1.230933ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36104]
I0212 07:24:35.803484  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.803606  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-8.15828d05539e3d15: (2.880269ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36096]
I0212 07:24:35.803685  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4
I0212 07:24:35.803702  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4
I0212 07:24:35.803785  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.803858  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.805809  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (1.677504ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36106]
I0212 07:24:35.806398  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4/status: (2.275088ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36104]
I0212 07:24:35.806872  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:35.808624  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (1.60496ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36104]
I0212 07:24:35.808910  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.809037  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-4.15828d053acc674c: (2.432511ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36110]
I0212 07:24:35.809201  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7
I0212 07:24:35.809227  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7
I0212 07:24:35.809338  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.809396  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.811069  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (1.295707ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36104]
I0212 07:24:35.811436  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.424166ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36112]
I0212 07:24:35.812273  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7/status: (2.558102ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36106]
I0212 07:24:35.813828  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (1.123995ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36112]
I0212 07:24:35.814064  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.814220  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0
I0212 07:24:35.814237  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0
I0212 07:24:35.814355  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.814403  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.816562  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0/status: (1.830132ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36112]
I0212 07:24:35.818264  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (1.275703ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36112]
I0212 07:24:35.818831  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (3.385568ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36114]
I0212 07:24:35.819217  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.819409  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7
I0212 07:24:35.819430  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7
I0212 07:24:35.819593  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.819648  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.820363  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-0.15828d053a8e1a6a: (5.323051ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36104]
I0212 07:24:35.821215  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (1.201836ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36114]
I0212 07:24:35.821545  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:35.821734  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7/status: (1.842284ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36112]
I0212 07:24:35.823323  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (1.040557ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36112]
I0212 07:24:35.823601  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.823674  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-7.15828d0555015af5: (2.404985ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36104]
I0212 07:24:35.823847  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5
I0212 07:24:35.823879  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5
I0212 07:24:35.823964  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.824008  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.826074  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.409167ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36116]
I0212 07:24:35.826092  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (1.832765ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36114]
I0212 07:24:35.826173  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5/status: (1.932566ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36112]
I0212 07:24:35.827723  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (1.098974ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36112]
I0212 07:24:35.827996  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.828249  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3
I0212 07:24:35.828267  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3
I0212 07:24:35.828386  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.828452  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.829936  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (1.257009ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36112]
I0212 07:24:35.831085  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.520166ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36118]
I0212 07:24:35.831092  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3/status: (2.394336ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36116]
I0212 07:24:35.832773  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (1.218309ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36118]
I0212 07:24:35.833028  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.833229  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5
I0212 07:24:35.833249  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5
I0212 07:24:35.833380  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.833434  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.834881  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (1.166517ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36112]
I0212 07:24:35.836353  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-5.15828d0555e067ba: (2.13924ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36120]
I0212 07:24:35.836356  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5/status: (2.648309ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36118]
I0212 07:24:35.838112  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (1.328721ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36118]
I0212 07:24:35.838467  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.838681  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3
I0212 07:24:35.838701  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3
I0212 07:24:35.838795  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.838852  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.841084  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (1.517433ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36112]
I0212 07:24:35.841124  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3/status: (2.016143ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36118]
I0212 07:24:35.841697  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:35.841920  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-3.15828d0556240c29: (2.281782ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36122]
I0212 07:24:35.842874  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (1.095084ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36118]
I0212 07:24:35.843177  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.843532  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2
I0212 07:24:35.843556  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2
I0212 07:24:35.843708  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.843771  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.845297  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (1.263265ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36112]
I0212 07:24:35.846734  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (2.248402ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36124]
I0212 07:24:35.846902  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2/status: (2.739399ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36122]
I0212 07:24:35.848456  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (1.016044ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36124]
I0212 07:24:35.848722  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.848922  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1
I0212 07:24:35.848940  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1
I0212 07:24:35.849061  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.849118  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.850650  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.279863ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36112]
I0212 07:24:35.851074  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.371365ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36126]
I0212 07:24:35.851328  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1/status: (1.960346ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36124]
I0212 07:24:35.853018  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.186738ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36124]
I0212 07:24:35.853316  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.853577  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2
I0212 07:24:35.853598  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2
I0212 07:24:35.853714  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.853763  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.855102  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (1.126674ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36124]
I0212 07:24:35.855374  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:35.855452  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2/status: (1.4698ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36112]
I0212 07:24:35.857410  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-2.15828d05570df2e4: (2.675207ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36128]
I0212 07:24:35.859411  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (3.483881ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36112]
I0212 07:24:35.859731  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.859893  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1
I0212 07:24:35.859918  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1
I0212 07:24:35.860035  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:35.860095  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:35.861931  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.615715ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36124]
I0212 07:24:35.862186  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1/status: (1.871902ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36128]
I0212 07:24:35.862244  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:35.863796  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.25084ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36128]
I0212 07:24:35.864018  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:35.864190  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-1.15828d05575f8dcc: (3.010903ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36130]
I0212 07:24:35.876731  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.630505ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36128]
I0212 07:24:35.977573  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (2.434317ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36128]
I0212 07:24:36.077304  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (2.063053ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36128]
I0212 07:24:36.177332  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (2.175023ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36128]
I0212 07:24:36.277215  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (2.103806ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36128]
I0212 07:24:36.377053  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.907555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36128]
I0212 07:24:36.477033  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.864306ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36128]
I0212 07:24:36.577113  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.95137ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36128]
I0212 07:24:36.627600  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod
I0212 07:24:36.627634  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod
I0212 07:24:36.627899  124047 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod", node "node1"
I0212 07:24:36.627923  124047 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I0212 07:24:36.627978  124047 factory.go:733] Attempting to bind preemptor-pod to node1
I0212 07:24:36.628033  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:36.628074  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30
I0212 07:24:36.628203  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.628254  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.630103  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod/binding: (1.704299ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36128]
I0212 07:24:36.630181  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.715055ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36124]
I0212 07:24:36.630307  124047 scheduler.go:571] pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 07:24:36.630569  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.631028  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30/status: (2.053156ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36170]
I0212 07:24:36.631600  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-30.15828d053e19fae9: (2.463261ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36172]
I0212 07:24:36.632651  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-30: (1.102997ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36124]
I0212 07:24:36.632871  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.633113  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:36.633135  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34
I0212 07:24:36.633258  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.633295  124047 wrap.go:47] POST /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events: (1.30998ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36172]
I0212 07:24:36.633303  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.634691  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.195174ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36124]
I0212 07:24:36.634932  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.635328  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34/status: (1.756795ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36128]
I0212 07:24:36.636183  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-34.15828d05488cb876: (2.258453ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36172]
I0212 07:24:36.637221  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-34: (1.515396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36128]
I0212 07:24:36.637528  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.637671  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:36.637689  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35
I0212 07:24:36.637757  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.637799  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.639158  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.092471ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36124]
I0212 07:24:36.639382  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.639886  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35/status: (1.819138ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36172]
I0212 07:24:36.641576  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-35.15828d05483abeb5: (2.968981ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36174]
I0212 07:24:36.641850  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-35: (1.497088ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36172]
I0212 07:24:36.642147  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.642323  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:36.642342  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33
I0212 07:24:36.642423  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.642469  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.645220  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33/status: (2.107306ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36124]
I0212 07:24:36.645431  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (2.287032ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36174]
I0212 07:24:36.646280  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-33.15828d053e94faac: (2.966648ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36176]
I0212 07:24:36.647086  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-33: (1.150694ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36174]
I0212 07:24:36.647396  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.647731  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:36.647753  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37
I0212 07:24:36.647858  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.647908  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.649677  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37/status: (1.521722ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36176]
I0212 07:24:36.649727  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.090642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36124]
I0212 07:24:36.650019  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.650953  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-37.15828d0547087077: (2.287034ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36178]
I0212 07:24:36.651477  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-37: (1.30632ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36124]
I0212 07:24:36.651745  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.651969  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:36.651988  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20
I0212 07:24:36.652089  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.652137  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.653536  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (1.172099ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36176]
I0212 07:24:36.653809  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.654308  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20/status: (1.932472ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36178]
I0212 07:24:36.655165  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-20.15828d053cc16b64: (2.301746ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36180]
I0212 07:24:36.655809  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (1.069066ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36178]
I0212 07:24:36.656096  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.656279  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:36.656299  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47
I0212 07:24:36.656406  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.656461  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.658814  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.83292ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36180]
I0212 07:24:36.659071  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47/status: (2.36369ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36176]
I0212 07:24:36.659138  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.659959  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-47.15828d0540f5336d: (2.913816ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36182]
I0212 07:24:36.660436  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-47: (1.016732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36176]
I0212 07:24:36.660796  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.661034  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:36.661061  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40
I0212 07:24:36.661150  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.661196  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.662822  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.222778ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36180]
I0212 07:24:36.663128  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.663628  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40/status: (2.212379ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36182]
I0212 07:24:36.664147  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-40.15828d0545d12747: (2.257193ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36184]
I0212 07:24:36.665222  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-40: (1.082436ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36182]
I0212 07:24:36.665597  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.665827  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:36.665848  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36
I0212 07:24:36.665963  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.666014  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.667578  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.29747ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36180]
I0212 07:24:36.667811  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.668001  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36/status: (1.739443ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36184]
I0212 07:24:36.668726  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-36.15828d053f214cc6: (1.887786ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36186]
I0212 07:24:36.669484  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-36: (1.05624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36184]
I0212 07:24:36.669802  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.669992  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:36.670017  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38
I0212 07:24:36.670118  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.670164  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.671701  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.247263ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36180]
I0212 07:24:36.672023  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38/status: (1.569179ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36186]
I0212 07:24:36.672029  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.673118  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-38.15828d053f6ab4ce: (2.120357ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36188]
I0212 07:24:36.673574  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-38: (1.110001ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36186]
I0212 07:24:36.673794  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.673924  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:36.673946  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42
I0212 07:24:36.674093  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.674150  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.675556  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.184723ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36188]
I0212 07:24:36.675816  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.676330  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42/status: (1.929741ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36180]
I0212 07:24:36.676354  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/preemptor-pod: (1.069676ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36192]
I0212 07:24:36.676884  124047 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I0212 07:24:36.677898  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-42.15828d0544a1af92: (2.692965ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36190]
I0212 07:24:36.677947  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-42: (1.24334ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36180]
I0212 07:24:36.678191  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.678345  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:36.678371  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44
I0212 07:24:36.678387  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-0: (1.06937ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36192]
I0212 07:24:36.678466  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.678540  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.679868  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.085493ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36188]
I0212 07:24:36.680070  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-1: (1.087346ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36194]
I0212 07:24:36.680182  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.680467  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44/status: (1.717402ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36190]
I0212 07:24:36.681483  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-44.15828d0544546136: (2.318245ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36196]
I0212 07:24:36.681636  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-2: (1.10453ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36194]
I0212 07:24:36.681940  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-44: (1.031127ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36190]
I0212 07:24:36.682162  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.682329  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:36.682350  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41
I0212 07:24:36.682432  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.682480  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.683057  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-3: (1.028596ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36196]
I0212 07:24:36.683965  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (1.238291ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36190]
I0212 07:24:36.684217  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.684536  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-4: (1.143627ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36196]
I0212 07:24:36.684949  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41/status: (2.15543ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36188]
I0212 07:24:36.685374  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-41.15828d053fa99a92: (2.2163ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36198]
I0212 07:24:36.686321  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-41: (1.005329ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36188]
I0212 07:24:36.686339  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-5: (1.389329ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36196]
I0212 07:24:36.686636  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.686788  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:36.686806  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43
I0212 07:24:36.686953  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.687519  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.688227  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-6: (1.548324ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36198]
I0212 07:24:36.688260  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (982.697µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36190]
I0212 07:24:36.688474  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.689816  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-7: (1.102479ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36190]
I0212 07:24:36.690519  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43/status: (2.1959ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36200]
I0212 07:24:36.690768  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-43.15828d053fe8fe9a: (2.406141ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36202]
I0212 07:24:36.691522  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-8: (1.349145ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36190]
I0212 07:24:36.692135  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-43: (1.024435ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36200]
I0212 07:24:36.692362  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.692589  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:36.692614  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46
I0212 07:24:36.692723  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.692781  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.693121  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-9: (1.105869ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36190]
I0212 07:24:36.694162  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.069501ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36200]
I0212 07:24:36.694406  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.695270  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-10: (1.534563ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36204]
I0212 07:24:36.695623  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-46.15828d0542c90e65: (2.279442ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36190]
I0212 07:24:36.696850  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-11: (1.127091ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36204]
I0212 07:24:36.697361  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46/status: (4.126682ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36198]
I0212 07:24:36.698746  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-12: (1.4041ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36190]
I0212 07:24:36.698875  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-46: (1.051535ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36198]
I0212 07:24:36.699105  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.699272  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:36.699290  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48
I0212 07:24:36.699517  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.699583  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.700084  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-13: (1.053668ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36190]
I0212 07:24:36.701371  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.544651ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36200]
I0212 07:24:36.701682  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-14: (1.005473ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36206]
I0212 07:24:36.701784  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.701854  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48/status: (2.022961ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36198]
I0212 07:24:36.703856  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-48: (1.629938ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36200]
I0212 07:24:36.703878  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-48.15828d05427c6aa8: (3.408301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36190]
I0212 07:24:36.703929  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-15: (1.785533ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36206]
I0212 07:24:36.704101  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.704313  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:36.704334  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45
I0212 07:24:36.704435  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.704485  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.705634  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-16: (1.240437ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36190]
I0212 07:24:36.705939  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (1.255462ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36200]
I0212 07:24:36.706199  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.707106  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45/status: (2.148687ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36208]
I0212 07:24:36.707643  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-17: (1.549417ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36190]
I0212 07:24:36.708775  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-45.15828d05409b5f30: (3.344429ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36210]
I0212 07:24:36.709116  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-45: (1.476105ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36208]
I0212 07:24:36.709428  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.709556  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-18: (1.395444ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36190]
I0212 07:24:36.709634  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:36.709654  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49
I0212 07:24:36.709737  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.709793  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.711258  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-19: (1.285674ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36210]
I0212 07:24:36.712284  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49/status: (2.256167ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36200]
I0212 07:24:36.712316  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.946915ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36212]
I0212 07:24:36.712935  124047 backoff_utils.go:79] Backing off 2s
I0212 07:24:36.713416  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-20: (1.68916ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36210]
I0212 07:24:36.713518  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-49.15828d054146dd39: (2.916558ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36214]
I0212 07:24:36.714814  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-49: (1.932439ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36212]
I0212 07:24:36.715080  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.715102  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-21: (1.178558ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36210]
I0212 07:24:36.715245  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:36.715268  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39
I0212 07:24:36.715412  124047 factory.go:647] Unable to schedule preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 07:24:36.715530  124047 factory.go:742] Updating pod condition for preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 07:24:36.717255  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-22: (1.694743ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36212]
I0212 07:24:36.717519  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (1.763036ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36200]
I0212 07:24:36.718212  124047 wrap.go:47] PUT /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39/status: (2.213894ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36216]
I0212 07:24:36.718594  124047 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/events/ppod-39.15828d0546b8fc0b: (2.356149ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36218]
I0212 07:24:36.720236  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-23: (1.095004ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36200]
I0212 07:24:36.720372  124047 wrap.go:47] GET /api/v1/namespaces/preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/pods/ppod-39: (998.882µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:36212]
I0212 07:24:36.720665  124047 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 07:24:36.720910  124047 scheduling_queue.go:868] About to try and schedule pod preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:36.720930  124047 scheduler.go:453] Attempting to schedule pod: preemption-race3cc7edcc-2e97-11e9-a750-0242ac110002/ppod-31
I0212 07:24:36.721014  124047 factory.go:647] Unable to schedule preemption-race3c