ResultFAILURE
Tests 1 failed / 606 succeeded
Started2019-01-10 19:30
Elapsed26m41s
Revision
Buildergke-prow-containerd-pool-99179761-s4k7
podee9bbde5-150d-11e9-ada6-0a580a6c0160
infra-commit2ffa580d2
podee9bbde5-150d-11e9-ada6-0a580a6c0160
repok8s.io/kubernetes
repo-commitee9331e58527d317823e632d60b652ec49a146cb
repos{u'k8s.io/kubernetes': u'master'}

Test Failures


k8s.io/kubernetes/test/integration/scheduler TestPreemptionRaces 21s

go test -v k8s.io/kubernetes/test/integration/scheduler -run TestPreemptionRaces$
I0110 19:49:03.059058  121338 services.go:33] Network range for service cluster IPs is unspecified. Defaulting to {10.0.0.0 ffffff00}.
I0110 19:49:03.059094  121338 services.go:45] Setting service IP to "10.0.0.1" (read-write).
I0110 19:49:03.059105  121338 master.go:273] Node port range unspecified. Defaulting to 30000-32767.
I0110 19:49:03.059116  121338 master.go:229] Using reconciler: 
I0110 19:49:03.061116  121338 storage_factory.go:285] storing podtemplates in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.061267  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.061285  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.061324  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.061387  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.061761  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.061922  121338 store.go:1414] Monitoring podtemplates count at <storage-prefix>//podtemplates
I0110 19:49:03.061958  121338 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.062160  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.062176  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.062204  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.062307  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.062377  121338 reflector.go:169] Listing and watching *core.PodTemplate from storage/cacher.go:/podtemplates
I0110 19:49:03.062650  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.065606  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.065658  121338 store.go:1414] Monitoring events count at <storage-prefix>//events
I0110 19:49:03.065699  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.065692  121338 storage_factory.go:285] storing limitranges in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.065788  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.065802  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.065832  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.065884  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.066130  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.066283  121338 store.go:1414] Monitoring limitranges count at <storage-prefix>//limitranges
I0110 19:49:03.066310  121338 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.066371  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.066383  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.066417  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.066485  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.066566  121338 reflector.go:169] Listing and watching *core.LimitRange from storage/cacher.go:/limitranges
I0110 19:49:03.066868  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.068474  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.068573  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.068626  121338 store.go:1414] Monitoring resourcequotas count at <storage-prefix>//resourcequotas
I0110 19:49:03.068717  121338 reflector.go:169] Listing and watching *core.ResourceQuota from storage/cacher.go:/resourcequotas
I0110 19:49:03.068834  121338 storage_factory.go:285] storing secrets in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.068934  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.068947  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.068973  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.069036  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.069388  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.069472  121338 store.go:1414] Monitoring secrets count at <storage-prefix>//secrets
I0110 19:49:03.069653  121338 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.069720  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.069731  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.069757  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.069817  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.069860  121338 reflector.go:169] Listing and watching *core.Secret from storage/cacher.go:/secrets
I0110 19:49:03.070044  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.070301  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.070427  121338 store.go:1414] Monitoring persistentvolumes count at <storage-prefix>//persistentvolumes
I0110 19:49:03.070520  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.070603  121338 reflector.go:169] Listing and watching *core.PersistentVolume from storage/cacher.go:/persistentvolumes
I0110 19:49:03.070594  121338 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.070665  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.070674  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.070698  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.070751  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.071138  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.071261  121338 store.go:1414] Monitoring persistentvolumeclaims count at <storage-prefix>//persistentvolumeclaims
I0110 19:49:03.072441  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.072573  121338 reflector.go:169] Listing and watching *core.PersistentVolumeClaim from storage/cacher.go:/persistentvolumeclaims
I0110 19:49:03.074050  121338 storage_factory.go:285] storing configmaps in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.074413  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.074439  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.074471  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.074546  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.074787  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.074864  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.074890  121338 store.go:1414] Monitoring configmaps count at <storage-prefix>//configmaps
I0110 19:49:03.074912  121338 reflector.go:169] Listing and watching *core.ConfigMap from storage/cacher.go:/configmaps
I0110 19:49:03.075083  121338 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.075173  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.075186  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.075213  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.075264  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.075549  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.075697  121338 store.go:1414] Monitoring namespaces count at <storage-prefix>//namespaces
I0110 19:49:03.075726  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.075765  121338 reflector.go:169] Listing and watching *core.Namespace from storage/cacher.go:/namespaces
I0110 19:49:03.075867  121338 storage_factory.go:285] storing endpoints in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.075959  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.075980  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.076012  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.076074  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.076335  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.076419  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.076452  121338 store.go:1414] Monitoring endpoints count at <storage-prefix>//endpoints
I0110 19:49:03.076549  121338 reflector.go:169] Listing and watching *core.Endpoints from storage/cacher.go:/endpoints
I0110 19:49:03.076630  121338 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.076726  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.076740  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.076766  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.076854  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.077061  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.077123  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.077175  121338 store.go:1414] Monitoring nodes count at <storage-prefix>//nodes
I0110 19:49:03.077346  121338 reflector.go:169] Listing and watching *core.Node from storage/cacher.go:/nodes
I0110 19:49:03.077717  121338 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.077791  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.077802  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.077855  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.077891  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.079599  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.079682  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.079771  121338 store.go:1414] Monitoring pods count at <storage-prefix>//pods
I0110 19:49:03.079810  121338 reflector.go:169] Listing and watching *core.Pod from storage/cacher.go:/pods
I0110 19:49:03.079955  121338 storage_factory.go:285] storing serviceaccounts in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.080069  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.080113  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.080427  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.080506  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.080775  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.080875  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.081088  121338 store.go:1414] Monitoring serviceaccounts count at <storage-prefix>//serviceaccounts
I0110 19:49:03.081250  121338 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.081315  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.081847  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.081905  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.081731  121338 reflector.go:169] Listing and watching *core.ServiceAccount from storage/cacher.go:/serviceaccounts
I0110 19:49:03.081968  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.082273  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.082377  121338 store.go:1414] Monitoring services count at <storage-prefix>//services
I0110 19:49:03.082410  121338 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.082546  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.082565  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.082616  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.082659  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.082712  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.082737  121338 reflector.go:169] Listing and watching *core.Service from storage/cacher.go:/services
I0110 19:49:03.083639  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.083745  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.083765  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.083771  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.086995  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.087080  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.087416  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.087481  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.087674  121338 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.087762  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.087779  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.087813  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.087882  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.088242  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.088305  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.088376  121338 store.go:1414] Monitoring replicationcontrollers count at <storage-prefix>//replicationcontrollers
I0110 19:49:03.088535  121338 reflector.go:169] Listing and watching *core.ReplicationController from storage/cacher.go:/replicationcontrollers
I0110 19:49:03.114352  121338 master.go:408] Skipping disabled API group "auditregistration.k8s.io".
I0110 19:49:03.114401  121338 master.go:416] Enabling API group "authentication.k8s.io".
I0110 19:49:03.114417  121338 master.go:416] Enabling API group "authorization.k8s.io".
I0110 19:49:03.114635  121338 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.114778  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.114801  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.114844  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.114917  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.115360  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.115590  121338 store.go:1414] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0110 19:49:03.115783  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.115794  121338 reflector.go:169] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0110 19:49:03.115946  121338 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.116104  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.116141  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.116335  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.116465  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.116852  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.117105  121338 store.go:1414] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0110 19:49:03.117226  121338 reflector.go:169] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0110 19:49:03.117260  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.117369  121338 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.117446  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.117464  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.117534  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.117593  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.117880  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.118009  121338 store.go:1414] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0110 19:49:03.118038  121338 master.go:416] Enabling API group "autoscaling".
I0110 19:49:03.118205  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.118218  121338 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.118375  121338 reflector.go:169] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0110 19:49:03.118395  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.118407  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.118463  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.118555  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.118976  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.119155  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.119251  121338 store.go:1414] Monitoring jobs.batch count at <storage-prefix>//jobs
I0110 19:49:03.119379  121338 reflector.go:169] Listing and watching *batch.Job from storage/cacher.go:/jobs
I0110 19:49:03.119626  121338 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.119807  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.119822  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.119850  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.119934  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.120328  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.120407  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.120472  121338 store.go:1414] Monitoring cronjobs.batch count at <storage-prefix>//cronjobs
I0110 19:49:03.120533  121338 reflector.go:169] Listing and watching *batch.CronJob from storage/cacher.go:/cronjobs
I0110 19:49:03.120572  121338 master.go:416] Enabling API group "batch".
I0110 19:49:03.120788  121338 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.120918  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.120967  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.121063  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.121164  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.121692  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.121800  121338 store.go:1414] Monitoring certificatesigningrequests.certificates.k8s.io count at <storage-prefix>//certificatesigningrequests
I0110 19:49:03.121842  121338 master.go:416] Enabling API group "certificates.k8s.io".
I0110 19:49:03.122036  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.122087  121338 reflector.go:169] Listing and watching *certificates.CertificateSigningRequest from storage/cacher.go:/certificatesigningrequests
I0110 19:49:03.122106  121338 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.122256  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.122278  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.122310  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.122382  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.123111  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.123174  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.123292  121338 store.go:1414] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I0110 19:49:03.123368  121338 reflector.go:169] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I0110 19:49:03.123533  121338 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.123614  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.123627  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.123662  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.123846  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.124836  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.124929  121338 store.go:1414] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I0110 19:49:03.124942  121338 master.go:416] Enabling API group "coordination.k8s.io".
I0110 19:49:03.125127  121338 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.125299  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.125310  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.125342  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.125358  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.125368  121338 reflector.go:169] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I0110 19:49:03.125390  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.125622  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.125733  121338 store.go:1414] Monitoring replicationcontrollers count at <storage-prefix>//replicationcontrollers
I0110 19:49:03.125869  121338 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.125958  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.125970  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.125996  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.126052  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.126073  121338 reflector.go:169] Listing and watching *core.ReplicationController from storage/cacher.go:/replicationcontrollers
I0110 19:49:03.126328  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.126702  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.126791  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.126992  121338 store.go:1414] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I0110 19:49:03.127084  121338 reflector.go:169] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I0110 19:49:03.127276  121338 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.127425  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.127439  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.127465  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.127705  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.132004  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.132176  121338 store.go:1414] Monitoring deployments.apps count at <storage-prefix>//deployments
I0110 19:49:03.132393  121338 storage_factory.go:285] storing ingresses.extensions in extensions/v1beta1, reading as extensions/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.132479  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.132513  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.132547  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.132659  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.132716  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.132811  121338 reflector.go:169] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0110 19:49:03.132935  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.133008  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.133048  121338 store.go:1414] Monitoring ingresses.extensions count at <storage-prefix>//ingresses
I0110 19:49:03.133115  121338 reflector.go:169] Listing and watching *extensions.Ingress from storage/cacher.go:/ingresses
I0110 19:49:03.135785  121338 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.135908  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.135924  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.135961  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.136017  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.136672  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.136816  121338 store.go:1414] Monitoring podsecuritypolicies.policy count at <storage-prefix>//podsecuritypolicies
I0110 19:49:03.136863  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.136887  121338 reflector.go:169] Listing and watching *policy.PodSecurityPolicy from storage/cacher.go:/podsecuritypolicies
I0110 19:49:03.137020  121338 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.137098  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.137111  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.137163  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.137209  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.137456  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.137514  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.137638  121338 store.go:1414] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I0110 19:49:03.137808  121338 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.137892  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.137908  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.137939  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.138008  121338 reflector.go:169] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I0110 19:49:03.138361  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.139907  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.140053  121338 store.go:1414] Monitoring networkpolicies.networking.k8s.io count at <storage-prefix>//networkpolicies
I0110 19:49:03.140078  121338 master.go:416] Enabling API group "extensions".
I0110 19:49:03.140348  121338 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.140433  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.140450  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.140461  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.140523  121338 reflector.go:169] Listing and watching *networking.NetworkPolicy from storage/cacher.go:/networkpolicies
I0110 19:49:03.140543  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.140696  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.140949  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.141002  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.141038  121338 store.go:1414] Monitoring networkpolicies.networking.k8s.io count at <storage-prefix>//networkpolicies
I0110 19:49:03.141060  121338 master.go:416] Enabling API group "networking.k8s.io".
I0110 19:49:03.141125  121338 reflector.go:169] Listing and watching *networking.NetworkPolicy from storage/cacher.go:/networkpolicies
I0110 19:49:03.141263  121338 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.141344  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.141363  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.141396  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.141664  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.141891  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.141978  121338 store.go:1414] Monitoring poddisruptionbudgets.policy count at <storage-prefix>//poddisruptionbudgets
I0110 19:49:03.142114  121338 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.142200  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.142212  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.142254  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.142312  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.142335  121338 reflector.go:169] Listing and watching *policy.PodDisruptionBudget from storage/cacher.go:/poddisruptionbudgets
I0110 19:49:03.143127  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.144193  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.144348  121338 store.go:1414] Monitoring podsecuritypolicies.policy count at <storage-prefix>//podsecuritypolicies
I0110 19:49:03.144362  121338 master.go:416] Enabling API group "policy".
I0110 19:49:03.144379  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.144395  121338 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.144451  121338 reflector.go:169] Listing and watching *policy.PodSecurityPolicy from storage/cacher.go:/podsecuritypolicies
I0110 19:49:03.144464  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.144476  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.144522  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.144803  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.150246  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.150956  121338 store.go:1414] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I0110 19:49:03.151609  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.151570  121338 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.151826  121338 reflector.go:169] Listing and watching *rbac.Role from storage/cacher.go:/roles
I0110 19:49:03.151936  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.151962  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.152210  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.152610  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.156457  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.156966  121338 store.go:1414] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I0110 19:49:03.157054  121338 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.157392  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.157515  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.157750  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.158295  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.158457  121338 reflector.go:169] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I0110 19:49:03.159293  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.207153  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.207428  121338 store.go:1414] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I0110 19:49:03.207801  121338 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.207998  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.208031  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.208094  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.208171  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.208267  121338 reflector.go:169] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I0110 19:49:03.208702  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.209055  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.209196  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.209304  121338 store.go:1414] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I0110 19:49:03.209379  121338 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.209553  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.209573  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.209623  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.209716  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.209724  121338 reflector.go:169] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I0110 19:49:03.209934  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.209976  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.210072  121338 store.go:1414] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I0110 19:49:03.210339  121338 reflector.go:169] Listing and watching *rbac.Role from storage/cacher.go:/roles
I0110 19:49:03.210373  121338 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.210524  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.210546  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.210584  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.210646  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.211589  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.211652  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.211744  121338 store.go:1414] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I0110 19:49:03.211790  121338 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.211841  121338 reflector.go:169] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I0110 19:49:03.211960  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.211973  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.212010  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.212123  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.212456  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.212626  121338 store.go:1414] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I0110 19:49:03.212982  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.213015  121338 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.213124  121338 reflector.go:169] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I0110 19:49:03.213299  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.213319  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.213377  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.213439  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.213769  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.213893  121338 store.go:1414] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I0110 19:49:03.213922  121338 master.go:416] Enabling API group "rbac.authorization.k8s.io".
I0110 19:49:03.214995  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.215146  121338 reflector.go:169] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I0110 19:49:03.219424  121338 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1beta1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.219624  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.219680  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.219734  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.219819  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.220311  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.220449  121338 store.go:1414] Monitoring priorityclasses.scheduling.k8s.io count at <storage-prefix>//priorityclasses
I0110 19:49:03.220478  121338 master.go:416] Enabling API group "scheduling.k8s.io".
I0110 19:49:03.220516  121338 master.go:408] Skipping disabled API group "settings.k8s.io".
I0110 19:49:03.220630  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.220681  121338 reflector.go:169] Listing and watching *scheduling.PriorityClass from storage/cacher.go:/priorityclasses
I0110 19:49:03.220850  121338 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.221006  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.221021  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.221251  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.221341  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.222221  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.222688  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.223131  121338 store.go:1414] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I0110 19:49:03.223181  121338 reflector.go:169] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I0110 19:49:03.223185  121338 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.223352  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.223368  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.223421  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.223479  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.225372  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.225525  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.225629  121338 store.go:1414] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I0110 19:49:03.226024  121338 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.226203  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.226253  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.226331  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.226520  121338 reflector.go:169] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I0110 19:49:03.226807  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.227130  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.227207  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.227303  121338 store.go:1414] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I0110 19:49:03.227331  121338 reflector.go:169] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I0110 19:49:03.227344  121338 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.227483  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.227517  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.227586  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.227686  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.230902  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.232039  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.232271  121338 store.go:1414] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I0110 19:49:03.232302  121338 master.go:416] Enabling API group "storage.k8s.io".
I0110 19:49:03.232545  121338 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.232661  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.232680  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.232713  121338 reflector.go:169] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I0110 19:49:03.232718  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.232854  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.234614  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.234784  121338 store.go:1414] Monitoring deployments.apps count at <storage-prefix>//deployments
I0110 19:49:03.234863  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.234935  121338 reflector.go:169] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0110 19:49:03.235687  121338 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.235783  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.235796  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.235835  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.236685  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.237900  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.237999  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.238412  121338 store.go:1414] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I0110 19:49:03.238507  121338 reflector.go:169] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I0110 19:49:03.241998  121338 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.242168  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.242195  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.242260  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.242343  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.247069  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.247219  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.247264  121338 store.go:1414] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I0110 19:49:03.247316  121338 reflector.go:169] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I0110 19:49:03.247454  121338 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.247588  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.247601  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.247647  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.247704  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.248037  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.248125  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.248198  121338 store.go:1414] Monitoring deployments.apps count at <storage-prefix>//deployments
I0110 19:49:03.248399  121338 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.248520  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.248546  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.248579  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.248664  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.248675  121338 reflector.go:169] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0110 19:49:03.249721  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.249762  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.249890  121338 store.go:1414] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I0110 19:49:03.249919  121338 reflector.go:169] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I0110 19:49:03.250092  121338 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.250188  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.250204  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.250252  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.250302  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.250641  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.250781  121338 store.go:1414] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I0110 19:49:03.250851  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.250915  121338 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.250982  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.250991  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.251021  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.251054  121338 reflector.go:169] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I0110 19:49:03.251152  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.251349  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.251434  121338 store.go:1414] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I0110 19:49:03.251573  121338 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.251607  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.251650  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.251661  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.251689  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.251725  121338 reflector.go:169] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I0110 19:49:03.251827  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.252270  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.252411  121338 store.go:1414] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I0110 19:49:03.252663  121338 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.252712  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.252759  121338 reflector.go:169] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I0110 19:49:03.252775  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.252787  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.252815  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.252851  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.254077  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.254301  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.254754  121338 store.go:1414] Monitoring deployments.apps count at <storage-prefix>//deployments
I0110 19:49:03.254830  121338 reflector.go:169] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0110 19:49:03.255663  121338 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.255779  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.255937  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.255988  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.256037  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.256927  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.257003  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.257246  121338 store.go:1414] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I0110 19:49:03.257335  121338 reflector.go:169] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I0110 19:49:03.257426  121338 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.258045  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.258101  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.258203  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.258370  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.259036  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.259250  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.259432  121338 store.go:1414] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I0110 19:49:03.259722  121338 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.259943  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.259967  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.260001  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.260068  121338 reflector.go:169] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I0110 19:49:03.260216  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.260591  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.260720  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.260723  121338 store.go:1414] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I0110 19:49:03.260762  121338 reflector.go:169] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I0110 19:49:03.260843  121338 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.260906  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.260914  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.260936  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.260980  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.261390  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.261504  121338 store.go:1414] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I0110 19:49:03.261521  121338 master.go:416] Enabling API group "apps".
I0110 19:49:03.261553  121338 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.261622  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.261634  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.261663  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.261729  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.261754  121338 reflector.go:169] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I0110 19:49:03.261944  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.262143  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.262220  121338 store.go:1414] Monitoring validatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//validatingwebhookconfigurations
I0110 19:49:03.262263  121338 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.262324  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.262336  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.262365  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.262429  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.262452  121338 reflector.go:169] Listing and watching *admissionregistration.ValidatingWebhookConfiguration from storage/cacher.go:/validatingwebhookconfigurations
I0110 19:49:03.262658  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.262859  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.262942  121338 store.go:1414] Monitoring mutatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//mutatingwebhookconfigurations
I0110 19:49:03.262956  121338 master.go:416] Enabling API group "admissionregistration.k8s.io".
I0110 19:49:03.262981  121338 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"650bb268-db60-4e7c-a018-1efbf068072c", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0110 19:49:03.263172  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:03.263362  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:03.264734  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:03.263418  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.263449  121338 reflector.go:169] Listing and watching *admissionregistration.MutatingWebhookConfiguration from storage/cacher.go:/mutatingwebhookconfigurations
I0110 19:49:03.266474  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:03.266816  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:03.266866  121338 store.go:1414] Monitoring events count at <storage-prefix>//events
I0110 19:49:03.266888  121338 master.go:416] Enabling API group "events.k8s.io".
I0110 19:49:03.266904  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
W0110 19:49:03.274658  121338 genericapiserver.go:334] Skipping API batch/v2alpha1 because it has no resources.
W0110 19:49:03.291648  121338 genericapiserver.go:334] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources.
W0110 19:49:03.292352  121338 genericapiserver.go:334] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources.
W0110 19:49:03.294802  121338 genericapiserver.go:334] Skipping API storage.k8s.io/v1alpha1 because it has no resources.
W0110 19:49:03.309036  121338 genericapiserver.go:334] Skipping API admissionregistration.k8s.io/v1alpha1 because it has no resources.
I0110 19:49:03.312840  121338 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0110 19:49:03.312875  121338 healthz.go:170] healthz check poststarthook/bootstrap-controller failed: not finished
I0110 19:49:03.312884  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:03.312892  121338 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0110 19:49:03.312899  121338 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0110 19:49:03.313054  121338 wrap.go:47] GET /healthz: (564.331µs) 500
goroutine 27399 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00cad0e70, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00cad0e70, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00b257920, 0x1f4)
net/http.Error(0x7fc633708540, 0xc00eb72148, 0xc0018a21a0, 0x18a, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc00eb72148, 0xc00c723100)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc00eb72148, 0xc00c723100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc00eb72148, 0xc00c723100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc00eb72148, 0xc00c723100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc00eb72148, 0xc00c723100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc00eb72148, 0xc00c723100)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc00eb72148, 0xc00c723100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc00eb72148, 0xc00c723100)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc00eb72148, 0xc00c723100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc00eb72148, 0xc00c723100)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc00eb72148, 0xc00c723100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc00eb72148, 0xc00c723000)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc00eb72148, 0xc00c723000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00b666300, 0xc00fd34c20, 0x604d680, 0xc00eb72148, 0xc00c723000)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[-]poststarthook/bootstrap-controller failed: reason withheld\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38228]
I0110 19:49:03.314138  121338 wrap.go:47] GET /api/v1/services: (1.239456ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38230]
I0110 19:49:03.319343  121338 wrap.go:47] GET /api/v1/services: (1.985655ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38230]
I0110 19:49:03.323046  121338 wrap.go:47] GET /api/v1/namespaces/default: (1.113582ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38230]
I0110 19:49:03.325980  121338 wrap.go:47] POST /api/v1/namespaces: (2.411026ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38230]
I0110 19:49:03.327525  121338 wrap.go:47] GET /api/v1/namespaces/default/services/kubernetes: (966.675µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38230]
I0110 19:49:03.332267  121338 wrap.go:47] POST /api/v1/namespaces/default/services: (4.313882ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38230]
I0110 19:49:03.334391  121338 wrap.go:47] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.595998ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38230]
I0110 19:49:03.337467  121338 wrap.go:47] POST /api/v1/namespaces/default/endpoints: (2.345712ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38230]
I0110 19:49:03.343104  121338 wrap.go:47] GET /api/v1/namespaces/default: (3.365699ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38228]
I0110 19:49:03.343343  121338 wrap.go:47] GET /api/v1/namespaces/kube-system: (4.197225ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38230]
I0110 19:49:03.346034  121338 wrap.go:47] POST /api/v1/namespaces: (1.937316ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38232]
I0110 19:49:03.346167  121338 wrap.go:47] GET /api/v1/services: (2.16354ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38228]
I0110 19:49:03.347272  121338 wrap.go:47] GET /api/v1/namespaces/default/services/kubernetes: (2.6097ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38234]
I0110 19:49:03.347922  121338 wrap.go:47] GET /api/v1/services: (3.448063ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38230]
I0110 19:49:03.348643  121338 wrap.go:47] GET /api/v1/namespaces/kube-public: (2.018335ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38236]
I0110 19:49:03.348694  121338 wrap.go:47] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.121149ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38234]
I0110 19:49:03.350646  121338 wrap.go:47] POST /api/v1/namespaces: (1.582971ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38230]
I0110 19:49:03.351987  121338 wrap.go:47] GET /api/v1/namespaces/kube-node-lease: (958.56µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38234]
I0110 19:49:03.353840  121338 wrap.go:47] POST /api/v1/namespaces: (1.468164ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38234]
I0110 19:49:03.413950  121338 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0110 19:49:03.413991  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:03.414002  121338 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0110 19:49:03.414010  121338 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0110 19:49:03.414212  121338 wrap.go:47] GET /healthz: (391.599µs) 500
goroutine 27354 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00de90bd0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00de90bd0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00e35f7c0, 0x1f4)
net/http.Error(0x7fc633708540, 0xc00f52c128, 0xc00df64180, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc00f52c128, 0xc00dfeab00)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc00f52c128, 0xc00dfeab00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc00f52c128, 0xc00dfeab00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc00f52c128, 0xc00dfeab00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc00f52c128, 0xc00dfeab00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc00f52c128, 0xc00dfeab00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc00f52c128, 0xc00dfeab00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc00f52c128, 0xc00dfeab00)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc00f52c128, 0xc00dfeab00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc00f52c128, 0xc00dfeab00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc00f52c128, 0xc00dfeab00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc00f52c128, 0xc00dfeaa00)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc00f52c128, 0xc00dfeaa00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00df250e0, 0xc00fd34c20, 0x604d680, 0xc00f52c128, 0xc00dfeaa00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38234]
I0110 19:49:03.513905  121338 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0110 19:49:03.513946  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:03.513956  121338 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0110 19:49:03.513963  121338 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0110 19:49:03.514155  121338 wrap.go:47] GET /healthz: (379.912µs) 500
goroutine 27338 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc007f06850, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc007f06850, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00b223c40, 0x1f4)
net/http.Error(0x7fc633708540, 0xc0059e0978, 0xc00e0c2300, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc0059e0978, 0xc000b4ae00)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc0059e0978, 0xc000b4ae00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc0059e0978, 0xc000b4ae00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc0059e0978, 0xc000b4ae00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc0059e0978, 0xc000b4ae00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc0059e0978, 0xc000b4ae00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc0059e0978, 0xc000b4ae00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc0059e0978, 0xc000b4ae00)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc0059e0978, 0xc000b4ae00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc0059e0978, 0xc000b4ae00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc0059e0978, 0xc000b4ae00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc0059e0978, 0xc000b4ad00)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc0059e0978, 0xc000b4ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00aa30900, 0xc00fd34c20, 0x604d680, 0xc0059e0978, 0xc000b4ad00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38234]
I0110 19:49:03.613955  121338 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0110 19:49:03.613991  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:03.614001  121338 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0110 19:49:03.614008  121338 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0110 19:49:03.614167  121338 wrap.go:47] GET /healthz: (329.134µs) 500
goroutine 27356 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00de90d20, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00de90d20, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00e35faa0, 0x1f4)
net/http.Error(0x7fc633708540, 0xc00f52c170, 0xc00df64a80, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc00f52c170, 0xc00dfeb300)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc00f52c170, 0xc00dfeb300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc00f52c170, 0xc00dfeb300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc00f52c170, 0xc00dfeb300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc00f52c170, 0xc00dfeb300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc00f52c170, 0xc00dfeb300)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc00f52c170, 0xc00dfeb300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc00f52c170, 0xc00dfeb300)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc00f52c170, 0xc00dfeb300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc00f52c170, 0xc00dfeb300)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc00f52c170, 0xc00dfeb300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc00f52c170, 0xc00dfeb200)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc00f52c170, 0xc00dfeb200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00df25380, 0xc00fd34c20, 0x604d680, 0xc00f52c170, 0xc00dfeb200)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38234]
I0110 19:49:03.713978  121338 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0110 19:49:03.714047  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:03.714059  121338 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0110 19:49:03.714067  121338 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0110 19:49:03.714225  121338 wrap.go:47] GET /healthz: (398.822µs) 500
goroutine 27482 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00264ddc0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00264ddc0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009904fa0, 0x1f4)
net/http.Error(0x7fc633708540, 0xc0055786f8, 0xc00945e780, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc0055786f8, 0xc002b84d00)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc0055786f8, 0xc002b84d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc0055786f8, 0xc002b84d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc0055786f8, 0xc002b84d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc0055786f8, 0xc002b84d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc0055786f8, 0xc002b84d00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc0055786f8, 0xc002b84d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc0055786f8, 0xc002b84d00)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc0055786f8, 0xc002b84d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc0055786f8, 0xc002b84d00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc0055786f8, 0xc002b84d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc0055786f8, 0xc002b84c00)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc0055786f8, 0xc002b84c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00aa1b260, 0xc00fd34c20, 0x604d680, 0xc0055786f8, 0xc002b84c00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38234]
I0110 19:49:03.814039  121338 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0110 19:49:03.814067  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:03.814075  121338 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0110 19:49:03.814079  121338 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0110 19:49:03.814195  121338 wrap.go:47] GET /healthz: (262.884µs) 500
goroutine 27340 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc007f06a10, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc007f06a10, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00b223fe0, 0x1f4)
net/http.Error(0x7fc633708540, 0xc0059e0980, 0xc00e0c2c00, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc0059e0980, 0xc000b4b200)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc0059e0980, 0xc000b4b200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc0059e0980, 0xc000b4b200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc0059e0980, 0xc000b4b200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc0059e0980, 0xc000b4b200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc0059e0980, 0xc000b4b200)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc0059e0980, 0xc000b4b200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc0059e0980, 0xc000b4b200)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc0059e0980, 0xc000b4b200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc0059e0980, 0xc000b4b200)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc0059e0980, 0xc000b4b200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc0059e0980, 0xc000b4b100)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc0059e0980, 0xc000b4b100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00aa30a80, 0xc00fd34c20, 0x604d680, 0xc0059e0980, 0xc000b4b100)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38234]
I0110 19:49:03.914828  121338 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0110 19:49:03.914880  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:03.914891  121338 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0110 19:49:03.914899  121338 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0110 19:49:03.915056  121338 wrap.go:47] GET /healthz: (357.281µs) 500
goroutine 27342 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc007f06af0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc007f06af0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0098a60e0, 0x1f4)
net/http.Error(0x7fc633708540, 0xc0059e09a8, 0xc00e0c3080, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc0059e09a8, 0xc002af4200)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc0059e09a8, 0xc002af4200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc0059e09a8, 0xc002af4200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc0059e09a8, 0xc002af4200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc0059e09a8, 0xc002af4200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc0059e09a8, 0xc002af4200)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc0059e09a8, 0xc002af4200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc0059e09a8, 0xc002af4200)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc0059e09a8, 0xc002af4200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc0059e09a8, 0xc002af4200)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc0059e09a8, 0xc002af4200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc0059e09a8, 0xc002af4100)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc0059e09a8, 0xc002af4100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00aa30c00, 0xc00fd34c20, 0x604d680, 0xc0059e09a8, 0xc002af4100)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38234]
I0110 19:49:04.013956  121338 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0110 19:49:04.013988  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:04.013995  121338 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0110 19:49:04.014000  121338 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0110 19:49:04.014178  121338 wrap.go:47] GET /healthz: (347.468µs) 500
goroutine 27358 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00de90e00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00de90e00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00e35fb40, 0x1f4)
net/http.Error(0x7fc633708540, 0xc00f52c178, 0xc00df64f00, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc00f52c178, 0xc00dfeb700)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc00f52c178, 0xc00dfeb700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc00f52c178, 0xc00dfeb700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc00f52c178, 0xc00dfeb700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc00f52c178, 0xc00dfeb700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc00f52c178, 0xc00dfeb700)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc00f52c178, 0xc00dfeb700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc00f52c178, 0xc00dfeb700)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc00f52c178, 0xc00dfeb700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc00f52c178, 0xc00dfeb700)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc00f52c178, 0xc00dfeb700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc00f52c178, 0xc00dfeb600)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc00f52c178, 0xc00dfeb600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00df25440, 0xc00fd34c20, 0x604d680, 0xc00f52c178, 0xc00dfeb600)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38234]
I0110 19:49:04.058719  121338 clientconn.go:551] parsed scheme: ""
I0110 19:49:04.058764  121338 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0110 19:49:04.058825  121338 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0110 19:49:04.058907  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:04.059376  121338 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0110 19:49:04.059434  121338 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0110 19:49:04.115180  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:04.115212  121338 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0110 19:49:04.115220  121338 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0110 19:49:04.115388  121338 wrap.go:47] GET /healthz: (1.556586ms) 500
goroutine 27360 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00de90ee0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00de90ee0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00e35fbe0, 0x1f4)
net/http.Error(0x7fc633708540, 0xc00f52c180, 0xc005c06160, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc00f52c180, 0xc00dfebb00)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc00f52c180, 0xc00dfebb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc00f52c180, 0xc00dfebb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc00f52c180, 0xc00dfebb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc00f52c180, 0xc00dfebb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc00f52c180, 0xc00dfebb00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc00f52c180, 0xc00dfebb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc00f52c180, 0xc00dfebb00)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc00f52c180, 0xc00dfebb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc00f52c180, 0xc00dfebb00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc00f52c180, 0xc00dfebb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc00f52c180, 0xc00dfeba00)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc00f52c180, 0xc00dfeba00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00df25500, 0xc00fd34c20, 0x604d680, 0xc00f52c180, 0xc00dfeba00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38234]
I0110 19:49:04.214715  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:04.214751  121338 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0110 19:49:04.214760  121338 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0110 19:49:04.214949  121338 wrap.go:47] GET /healthz: (1.183749ms) 500
goroutine 27431 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc007ee78f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc007ee78f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0098fe7a0, 0x1f4)
net/http.Error(0x7fc633708540, 0xc009288a48, 0xc008140b00, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc009288a48, 0xc002b54a00)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc009288a48, 0xc002b54a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc009288a48, 0xc002b54a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc009288a48, 0xc002b54a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc009288a48, 0xc002b54a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc009288a48, 0xc002b54a00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc009288a48, 0xc002b54a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc009288a48, 0xc002b54a00)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc009288a48, 0xc002b54a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc009288a48, 0xc002b54a00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc009288a48, 0xc002b54a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc009288a48, 0xc002b54700)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc009288a48, 0xc002b54700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc005e8f1a0, 0xc00fd34c20, 0x604d680, 0xc009288a48, 0xc002b54700)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38234]
I0110 19:49:04.312870  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.043327ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38260]
I0110 19:49:04.312879  121338 wrap.go:47] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-node-critical: (1.232901ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.313271  121338 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.630583ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38234]
I0110 19:49:04.315273  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:04.315303  121338 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0110 19:49:04.315311  121338 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0110 19:49:04.315357  121338 wrap.go:47] GET /api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication: (1.483225ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:04.315516  121338 wrap.go:47] GET /healthz: (1.922592ms) 500
goroutine 27417 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00feefd50, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00feefd50, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00e373920, 0x1f4)
net/http.Error(0x7fc633708540, 0xc00fd06208, 0xc005c06420, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc00fd06208, 0xc00294a000)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc00fd06208, 0xc00294a000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc00fd06208, 0xc00294a000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc00fd06208, 0xc00294a000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc00fd06208, 0xc00294a000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc00fd06208, 0xc00294a000)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc00fd06208, 0xc00294a000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc00fd06208, 0xc00294a000)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc00fd06208, 0xc00294a000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc00fd06208, 0xc00294a000)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc00fd06208, 0xc00294a000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc00fd06208, 0xc01050de00)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc00fd06208, 0xc01050de00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e342c00, 0xc00fd34c20, 0x604d680, 0xc00fd06208, 0xc01050de00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38234]
I0110 19:49:04.315628  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.22523ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.315911  121338 wrap.go:47] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (2.51111ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38260]
I0110 19:49:04.316168  121338 storage_scheduling.go:91] created PriorityClass system-node-critical with value 2000001000
I0110 19:49:04.317593  121338 wrap.go:47] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-cluster-critical: (1.248379ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.317831  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (1.821297ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38234]
I0110 19:49:04.318108  121338 wrap.go:47] POST /api/v1/namespaces/kube-system/configmaps: (1.941439ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:04.318955  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (831.383µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38234]
I0110 19:49:04.319450  121338 wrap.go:47] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (1.481136ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.319683  121338 storage_scheduling.go:91] created PriorityClass system-cluster-critical with value 2000000000
I0110 19:49:04.319700  121338 storage_scheduling.go:100] all system priority classes are created successfully or already exist.
I0110 19:49:04.320302  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (952.543µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38234]
I0110 19:49:04.321645  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (917.211µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.322915  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (949.956µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.323976  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (752.291µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.325276  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/cluster-admin: (952.425µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.327717  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.992948ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.327930  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/cluster-admin
I0110 19:49:04.329192  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:discovery: (989.721µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.331429  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.78524ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.331649  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:discovery
I0110 19:49:04.332759  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:basic-user: (905.744µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.334665  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.558837ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.334878  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:basic-user
I0110 19:49:04.336019  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (928.009µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.337899  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.501073ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.338089  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/admin
I0110 19:49:04.339225  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (898.177µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.341134  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.450934ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.341348  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/edit
I0110 19:49:04.342522  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (976.099µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.344512  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.561033ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.344922  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/view
I0110 19:49:04.347745  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (2.617ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.350329  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.117191ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.350591  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-admin
I0110 19:49:04.351675  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (893.527µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.353913  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.834038ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.354265  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-edit
I0110 19:49:04.355477  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (989.246µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.358395  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.384184ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.358680  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-view
I0110 19:49:04.359691  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:heapster: (846.796µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.361645  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.523159ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.361835  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:heapster
I0110 19:49:04.362983  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node: (945.551µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.365892  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.428259ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.366400  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node
I0110 19:49:04.367604  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-problem-detector: (967.852µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.369422  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.409462ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.369899  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node-problem-detector
I0110 19:49:04.371063  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-proxier: (946.863µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.373304  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.827677ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.373628  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node-proxier
I0110 19:49:04.374871  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kubelet-api-admin: (1.009961ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.377006  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.669656ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.377299  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kubelet-api-admin
I0110 19:49:04.378530  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-bootstrapper: (968.679µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.380647  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.677029ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.381118  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node-bootstrapper
I0110 19:49:04.382354  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:auth-delegator: (940.372µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.385241  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.422733ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.385513  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:auth-delegator
I0110 19:49:04.386617  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-aggregator: (919.578µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.388880  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.839792ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.389166  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-aggregator
I0110 19:49:04.390534  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-controller-manager: (1.151754ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.392933  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.779916ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.393157  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-controller-manager
I0110 19:49:04.394299  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-scheduler: (902.27µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.396763  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.031864ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.397315  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-scheduler
I0110 19:49:04.398678  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-dns: (1.126098ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.400749  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.585229ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.400972  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-dns
I0110 19:49:04.402322  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:persistent-volume-provisioner: (1.105808ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.404798  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.917156ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.405174  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:persistent-volume-provisioner
I0110 19:49:04.406273  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-attacher: (871.786µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.408798  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.874303ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.409045  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:csi-external-attacher
I0110 19:49:04.410194  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aws-cloud-provider: (918.483µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.412257  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.583521ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.412450  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aws-cloud-provider
I0110 19:49:04.413628  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:nodeclient: (928.221µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.414049  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:04.414194  121338 wrap.go:47] GET /healthz: (660.589µs) 500
goroutine 27668 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00a86a850, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00a86a850, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009388d80, 0x1f4)
net/http.Error(0x7fc633708540, 0xc0059e13f0, 0xc00edd6140, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc0059e13f0, 0xc0052a9100)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc0059e13f0, 0xc0052a9100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc0059e13f0, 0xc0052a9100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc0059e13f0, 0xc0052a9100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc0059e13f0, 0xc0052a9100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc0059e13f0, 0xc0052a9100)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc0059e13f0, 0xc0052a9100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc0059e13f0, 0xc0052a9100)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc0059e13f0, 0xc0052a9100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc0059e13f0, 0xc0052a9100)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc0059e13f0, 0xc0052a9100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc0059e13f0, 0xc0052a9000)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc0059e13f0, 0xc0052a9000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0034c1c20, 0xc00fd34c20, 0x604d680, 0xc0059e13f0, 0xc0052a9000)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38262]
I0110 19:49:04.415400  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.401751ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.415600  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:nodeclient
I0110 19:49:04.416650  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient: (865.506µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.418589  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.56097ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.418820  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
I0110 19:49:04.419972  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:volume-scheduler: (947.964µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.421992  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.612322ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.422256  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:volume-scheduler
I0110 19:49:04.423399  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-provisioner: (932.924µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.427571  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.66903ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.427837  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:csi-external-provisioner
I0110 19:49:04.429061  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:attachdetach-controller: (1.003084ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.431282  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.730986ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.431573  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I0110 19:49:04.432959  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:clusterrole-aggregation-controller: (1.148853ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.434909  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.457561ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.435090  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I0110 19:49:04.436062  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:cronjob-controller: (763.47µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.438150  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.677994ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.438409  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:cronjob-controller
I0110 19:49:04.439435  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:daemon-set-controller: (814.029µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.441483  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.651561ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.443201  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I0110 19:49:04.444482  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:deployment-controller: (1.038531ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.447133  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.773869ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.447354  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:deployment-controller
I0110 19:49:04.449737  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:disruption-controller: (2.197276ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.452046  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.787601ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.452301  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:disruption-controller
I0110 19:49:04.453536  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:endpoint-controller: (991.392µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.455565  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.669982ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.455867  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:endpoint-controller
I0110 19:49:04.457107  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:expand-controller: (1.042603ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.459872  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.894586ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.460116  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:expand-controller
I0110 19:49:04.461442  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:generic-garbage-collector: (1.017037ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.463654  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.740187ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.463903  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I0110 19:49:04.465088  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:horizontal-pod-autoscaler: (956.247µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.467401  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.758408ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.467638  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I0110 19:49:04.468830  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:job-controller: (995.695µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.470994  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.701774ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.471223  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:job-controller
I0110 19:49:04.472428  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:namespace-controller: (1.000104ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.475306  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.401925ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.475529  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:namespace-controller
I0110 19:49:04.476700  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:node-controller: (954.185µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.478827  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.680415ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.479096  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:node-controller
I0110 19:49:04.480309  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:persistent-volume-binder: (919.832µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.482885  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.17149ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.483154  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I0110 19:49:04.484464  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pod-garbage-collector: (1.090356ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.486676  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.601745ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.486913  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I0110 19:49:04.488053  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replicaset-controller: (907.63µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.490675  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.846081ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.490957  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:replicaset-controller
I0110 19:49:04.492114  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replication-controller: (938.997µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.494456  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.902873ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.494769  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:replication-controller
I0110 19:49:04.495926  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:resourcequota-controller: (957.022µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.497927  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.60443ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.498192  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I0110 19:49:04.499326  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:route-controller: (926.84µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.502381  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.55958ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.502641  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:route-controller
I0110 19:49:04.514778  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:04.514947  121338 wrap.go:47] GET /healthz: (1.135549ms) 500
goroutine 27502 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc007f1de30, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc007f1de30, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00984ed40, 0x1f4)
net/http.Error(0x7fc633708540, 0xc00eac4778, 0xc00264e280, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc00eac4778, 0xc0035ca400)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc00eac4778, 0xc0035ca400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc00eac4778, 0xc0035ca400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc00eac4778, 0xc0035ca400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc00eac4778, 0xc0035ca400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc00eac4778, 0xc0035ca400)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc00eac4778, 0xc0035ca400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc00eac4778, 0xc0035ca400)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc00eac4778, 0xc0035ca400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc00eac4778, 0xc0035ca400)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc00eac4778, 0xc0035ca400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc00eac4778, 0xc0035ca300)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc00eac4778, 0xc0035ca300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0077e8f60, 0xc00fd34c20, 0x604d680, 0xc00eac4778, 0xc0035ca300)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38262]
I0110 19:49:04.515698  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-account-controller: (1.026512ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.518017  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.837887ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.518257  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:service-account-controller
I0110 19:49:04.519318  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-controller: (848.814µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.521536  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.740861ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.521801  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:service-controller
I0110 19:49:04.522963  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:statefulset-controller: (947.625µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.525364  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.974288ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.525765  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:statefulset-controller
I0110 19:49:04.526988  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:ttl-controller: (991.828µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.529548  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.145431ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.529768  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:ttl-controller
I0110 19:49:04.530904  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:certificate-controller: (910.359µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.532855  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.50776ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.538302  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:certificate-controller
I0110 19:49:04.539728  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pvc-protection-controller: (1.136931ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.553951  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.005356ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.554208  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I0110 19:49:04.573423  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pv-protection-controller: (1.46321ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.594049  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.083303ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.594415  121338 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I0110 19:49:04.613545  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/cluster-admin: (1.600186ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.614467  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:04.614655  121338 wrap.go:47] GET /healthz: (998.904µs) 500
goroutine 27785 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00b235110, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00b235110, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc002af2ea0, 0x1f4)
net/http.Error(0x7fc633708540, 0xc003d42ab0, 0xc005452500, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc003d42ab0, 0xc0062bea00)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc003d42ab0, 0xc0062bea00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc003d42ab0, 0xc0062bea00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc003d42ab0, 0xc0062bea00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc003d42ab0, 0xc0062bea00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc003d42ab0, 0xc0062bea00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc003d42ab0, 0xc0062bea00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc003d42ab0, 0xc0062bea00)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc003d42ab0, 0xc0062bea00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc003d42ab0, 0xc0062bea00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc003d42ab0, 0xc0062bea00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc003d42ab0, 0xc0062be900)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc003d42ab0, 0xc0062be900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0065816e0, 0xc00fd34c20, 0x604d680, 0xc003d42ab0, 0xc0062be900)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38262]
I0110 19:49:04.634270  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.322271ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:04.634513  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/cluster-admin
I0110 19:49:04.653281  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:discovery: (1.362258ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:04.674226  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.269526ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:04.674482  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:discovery
I0110 19:49:04.693284  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:basic-user: (1.396098ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:04.715792  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.632028ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:04.716179  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:04.716278  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:basic-user
I0110 19:49:04.716336  121338 wrap.go:47] GET /healthz: (1.20268ms) 500
goroutine 27826 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00b7149a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00b7149a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00299a800, 0x1f4)
net/http.Error(0x7fc633708540, 0xc00eac4c88, 0xc00edd6a00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc00eac4c88, 0xc006872700)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc00eac4c88, 0xc006872700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc00eac4c88, 0xc006872700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc00eac4c88, 0xc006872700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc00eac4c88, 0xc006872700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc00eac4c88, 0xc006872700)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc00eac4c88, 0xc006872700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc00eac4c88, 0xc006872700)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc00eac4c88, 0xc006872700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc00eac4c88, 0xc006872700)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc00eac4c88, 0xc006872700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc00eac4c88, 0xc006872600)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc00eac4c88, 0xc006872600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00699f500, 0xc00fd34c20, 0x604d680, 0xc00eac4c88, 0xc006872600)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38238]
I0110 19:49:04.733791  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node-proxier: (1.952781ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.754313  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.382368ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.754590  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:node-proxier
I0110 19:49:04.773560  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-controller-manager: (1.520274ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.796663  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (4.691752ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.797190  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-controller-manager
I0110 19:49:04.813247  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-dns: (1.289859ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.814399  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:04.814606  121338 wrap.go:47] GET /healthz: (949.592µs) 500
goroutine 27830 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00b7153b0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00b7153b0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00288e960, 0x1f4)
net/http.Error(0x7fc633708540, 0xc00eac4db8, 0xc0039fa500, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc00eac4db8, 0xc006873600)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc00eac4db8, 0xc006873600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc00eac4db8, 0xc006873600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc00eac4db8, 0xc006873600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc00eac4db8, 0xc006873600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc00eac4db8, 0xc006873600)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc00eac4db8, 0xc006873600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc00eac4db8, 0xc006873600)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc00eac4db8, 0xc006873600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc00eac4db8, 0xc006873600)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc00eac4db8, 0xc006873600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc00eac4db8, 0xc006873500)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc00eac4db8, 0xc006873500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0069bf800, 0xc00fd34c20, 0x604d680, 0xc00eac4db8, 0xc006873500)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38238]
I0110 19:49:04.833620  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.721474ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.833881  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-dns
I0110 19:49:04.853605  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-scheduler: (1.436538ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.873984  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.055204ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.874282  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-scheduler
I0110 19:49:04.893520  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:aws-cloud-provider: (1.559976ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.914211  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.21734ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:04.914294  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:04.914459  121338 wrap.go:47] GET /healthz: (788.548µs) 500
goroutine 27865 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00b7cc8c0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00b7cc8c0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00281fd00, 0x1f4)
net/http.Error(0x7fc633708540, 0xc003d42e08, 0xc0039fa8c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc003d42e08, 0xc007178e00)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc003d42e08, 0xc007178e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc003d42e08, 0xc007178e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc003d42e08, 0xc007178e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc003d42e08, 0xc007178e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc003d42e08, 0xc007178e00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc003d42e08, 0xc007178e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc003d42e08, 0xc007178e00)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc003d42e08, 0xc007178e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc003d42e08, 0xc007178e00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc003d42e08, 0xc007178e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc003d42e08, 0xc007178d00)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc003d42e08, 0xc007178d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006c008a0, 0xc00fd34c20, 0x604d680, 0xc003d42e08, 0xc007178d00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38262]
I0110 19:49:04.914548  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:aws-cloud-provider
I0110 19:49:04.933160  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:volume-scheduler: (1.261132ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:04.953996  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.027856ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:04.954262  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:volume-scheduler
I0110 19:49:04.973423  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node: (1.518247ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:04.994035  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.125725ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:04.994333  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:node
I0110 19:49:05.013324  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:attachdetach-controller: (1.314852ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.014466  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:05.014696  121338 wrap.go:47] GET /healthz: (1.023929ms) 500
goroutine 27837 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00b4449a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00b4449a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0027a92c0, 0x1f4)
net/http.Error(0x7fc633708540, 0xc00eac4fc0, 0xc00edd6f00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc00eac4fc0, 0xc000f53100)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc00eac4fc0, 0xc000f53100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc00eac4fc0, 0xc000f53100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc00eac4fc0, 0xc000f53100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc00eac4fc0, 0xc000f53100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc00eac4fc0, 0xc000f53100)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc00eac4fc0, 0xc000f53100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc00eac4fc0, 0xc000f53100)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc00eac4fc0, 0xc000f53100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc00eac4fc0, 0xc000f53100)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc00eac4fc0, 0xc000f53100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc00eac4fc0, 0xc000f53000)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc00eac4fc0, 0xc000f53000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006c03c20, 0xc00fd34c20, 0x604d680, 0xc00eac4fc0, 0xc000f53000)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38262]
I0110 19:49:05.034177  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.266777ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.034410  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I0110 19:49:05.053369  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:clusterrole-aggregation-controller: (1.39208ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.074245  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.2313ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.074604  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I0110 19:49:05.093127  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:cronjob-controller: (1.251886ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.114138  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.129473ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.114534  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:cronjob-controller
I0110 19:49:05.114895  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:05.115049  121338 wrap.go:47] GET /healthz: (966.876µs) 500
goroutine 27853 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00b57ea10, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00b57ea10, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0026a6a00, 0x1f4)
net/http.Error(0x7fc633708540, 0xc001f28028, 0xc00edd72c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc001f28028, 0xc008013400)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc001f28028, 0xc008013400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc001f28028, 0xc008013400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc001f28028, 0xc008013400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc001f28028, 0xc008013400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc001f28028, 0xc008013400)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc001f28028, 0xc008013400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc001f28028, 0xc008013400)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc001f28028, 0xc008013400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc001f28028, 0xc008013400)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc001f28028, 0xc008013400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc001f28028, 0xc008013300)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc001f28028, 0xc008013300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006f183c0, 0xc00fd34c20, 0x604d680, 0xc001f28028, 0xc008013300)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38238]
I0110 19:49:05.134386  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:daemon-set-controller: (2.473896ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.155159  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.282898ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.155456  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I0110 19:49:05.173332  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:deployment-controller: (1.401719ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.194005  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.065194ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.194317  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:deployment-controller
I0110 19:49:05.213184  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:disruption-controller: (1.257767ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.214415  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:05.214600  121338 wrap.go:47] GET /healthz: (976.054µs) 500
goroutine 27891 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00b67cfc0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00b67cfc0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc002a50e00, 0x1f4)
net/http.Error(0x7fc633708540, 0xc00f52d630, 0xc000076b40, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc00f52d630, 0xc0061e4500)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc00f52d630, 0xc0061e4500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc00f52d630, 0xc0061e4500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc00f52d630, 0xc0061e4500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc00f52d630, 0xc0061e4500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc00f52d630, 0xc0061e4500)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc00f52d630, 0xc0061e4500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc00f52d630, 0xc0061e4500)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc00f52d630, 0xc0061e4500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc00f52d630, 0xc0061e4500)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc00f52d630, 0xc0061e4500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc00f52d630, 0xc0061e4300)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc00f52d630, 0xc0061e4300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006645320, 0xc00fd34c20, 0x604d680, 0xc00f52d630, 0xc0061e4300)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38238]
I0110 19:49:05.234157  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.370566ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.234397  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:disruption-controller
I0110 19:49:05.253296  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:endpoint-controller: (1.347523ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.275241  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.501825ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.275515  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:endpoint-controller
I0110 19:49:05.293293  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:expand-controller: (1.39069ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.314172  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.268388ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.314296  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:05.314438  121338 wrap.go:47] GET /healthz: (778.211µs) 500
goroutine 27941 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00b686930, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00b686930, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0025b3ba0, 0x1f4)
net/http.Error(0x7fc633708540, 0xc005579570, 0xc005452c80, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc005579570, 0xc003053400)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc005579570, 0xc003053400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc005579570, 0xc003053400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc005579570, 0xc003053400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc005579570, 0xc003053400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc005579570, 0xc003053400)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc005579570, 0xc003053400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc005579570, 0xc003053400)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc005579570, 0xc003053400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc005579570, 0xc003053400)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc005579570, 0xc003053400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc005579570, 0xc003053300)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc005579570, 0xc003053300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0072c6d20, 0xc00fd34c20, 0x604d680, 0xc005579570, 0xc003053300)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38262]
I0110 19:49:05.314442  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:expand-controller
I0110 19:49:05.334438  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:generic-garbage-collector: (1.478877ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.354452  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.546255ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.354737  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I0110 19:49:05.373176  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:horizontal-pod-autoscaler: (1.292237ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.394128  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.177991ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.394431  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I0110 19:49:05.413329  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:job-controller: (1.293653ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.414365  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:05.414553  121338 wrap.go:47] GET /healthz: (902.819µs) 500
goroutine 27943 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00b686af0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00b686af0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0024b6220, 0x1f4)
net/http.Error(0x7fc633708540, 0xc0055795c0, 0xc000076f00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc0055795c0, 0xc003053a00)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc0055795c0, 0xc003053a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc0055795c0, 0xc003053a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc0055795c0, 0xc003053a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc0055795c0, 0xc003053a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc0055795c0, 0xc003053a00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc0055795c0, 0xc003053a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc0055795c0, 0xc003053a00)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc0055795c0, 0xc003053a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc0055795c0, 0xc003053a00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc0055795c0, 0xc003053a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc0055795c0, 0xc003053900)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc0055795c0, 0xc003053900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0072c7500, 0xc00fd34c20, 0x604d680, 0xc0055795c0, 0xc003053900)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38262]
I0110 19:49:05.434090  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.158839ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.434363  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:job-controller
I0110 19:49:05.453245  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:namespace-controller: (1.287502ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.474102  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.20774ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.474383  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:namespace-controller
I0110 19:49:05.493179  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:node-controller: (1.31894ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.513956  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.029528ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.514260  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:node-controller
I0110 19:49:05.514433  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:05.514616  121338 wrap.go:47] GET /healthz: (857.273µs) 500
goroutine 27934 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00a4bb0a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00a4bb0a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0023a3860, 0x1f4)
net/http.Error(0x7fc633708540, 0xc00fd073d0, 0xc004910640, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc00fd073d0, 0xc00b42ab00)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc00fd073d0, 0xc00b42ab00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc00fd073d0, 0xc00b42ab00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc00fd073d0, 0xc00b42ab00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc00fd073d0, 0xc00b42ab00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc00fd073d0, 0xc00b42ab00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc00fd073d0, 0xc00b42ab00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc00fd073d0, 0xc00b42ab00)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc00fd073d0, 0xc00b42ab00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc00fd073d0, 0xc00b42ab00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc00fd073d0, 0xc00b42ab00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc00fd073d0, 0xc00b42aa00)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc00fd073d0, 0xc00b42aa00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0075ecea0, 0xc00fd34c20, 0x604d680, 0xc00fd073d0, 0xc00b42aa00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38238]
I0110 19:49:05.534180  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:persistent-volume-binder: (1.221685ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.554157  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.247438ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.554426  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I0110 19:49:05.573151  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pod-garbage-collector: (1.28584ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.594065  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.12569ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.594340  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I0110 19:49:05.613145  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replicaset-controller: (1.279248ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.614399  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:05.614584  121338 wrap.go:47] GET /healthz: (847.803µs) 500
goroutine 27957 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00a949880, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00a949880, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc001172160, 0x1f4)
net/http.Error(0x7fc633708540, 0xc0059e1fe0, 0xc0039faf00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc0059e1fe0, 0xc00b580200)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc0059e1fe0, 0xc00b580200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc0059e1fe0, 0xc00b580200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc0059e1fe0, 0xc00b580200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc0059e1fe0, 0xc00b580200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc0059e1fe0, 0xc00b580200)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc0059e1fe0, 0xc00b580200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc0059e1fe0, 0xc00b580200)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc0059e1fe0, 0xc00b580200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc0059e1fe0, 0xc00b580200)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc0059e1fe0, 0xc00b580200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc0059e1fe0, 0xc00b580100)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc0059e1fe0, 0xc00b580100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006434840, 0xc00fd34c20, 0x604d680, 0xc0059e1fe0, 0xc00b580100)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38238]
I0110 19:49:05.633972  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.084913ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.634216  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replicaset-controller
I0110 19:49:05.653085  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replication-controller: (1.207896ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.673802  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.962535ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.674028  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replication-controller
I0110 19:49:05.693270  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:resourcequota-controller: (1.378097ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.713949  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.028366ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.714285  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I0110 19:49:05.714305  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:05.714469  121338 wrap.go:47] GET /healthz: (829.764µs) 500
goroutine 27964 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00a949dc0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00a949dc0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc001173320, 0x1f4)
net/http.Error(0x7fc633708540, 0xc000f80378, 0xc0039fb540, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc000f80378, 0xc00b581a00)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc000f80378, 0xc00b581a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc000f80378, 0xc00b581a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc000f80378, 0xc00b581a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc000f80378, 0xc00b581a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc000f80378, 0xc00b581a00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc000f80378, 0xc00b581a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc000f80378, 0xc00b581a00)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc000f80378, 0xc00b581a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc000f80378, 0xc00b581a00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc000f80378, 0xc00b581a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc000f80378, 0xc00b581900)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc000f80378, 0xc00b581900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc007e121e0, 0xc00fd34c20, 0x604d680, 0xc000f80378, 0xc00b581900)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38262]
I0110 19:49:05.733075  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:route-controller: (1.193421ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.754111  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.217136ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.754524  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:route-controller
I0110 19:49:05.773397  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-account-controller: (1.464436ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.793981  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.048744ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.794257  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-account-controller
I0110 19:49:05.818104  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:05.818296  121338 wrap.go:47] GET /healthz: (3.63665ms) 500
goroutine 27987 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0b2850, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0b2850, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc000fad940, 0x1f4)
net/http.Error(0x7fc633708540, 0xc000f80810, 0xc001eee780, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc000f80810, 0xc00b683000)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc000f80810, 0xc00b683000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc000f80810, 0xc00b683000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc000f80810, 0xc00b683000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc000f80810, 0xc00b683000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc000f80810, 0xc00b683000)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc000f80810, 0xc00b683000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc000f80810, 0xc00b683000)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc000f80810, 0xc00b683000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc000f80810, 0xc00b683000)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc000f80810, 0xc00b683000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc000f80810, 0xc00b682f00)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc000f80810, 0xc00b682f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc007e12fc0, 0xc00fd34c20, 0x604d680, 0xc000f80810, 0xc00b682f00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38238]
I0110 19:49:05.818323  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-controller: (3.669665ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.835038  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.025618ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.835325  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-controller
I0110 19:49:05.852879  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:statefulset-controller: (1.025863ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.874258  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.350316ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.874565  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:statefulset-controller
I0110 19:49:05.893810  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:ttl-controller: (1.477038ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.913761  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.873169ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:05.913994  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:ttl-controller
I0110 19:49:05.914478  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:05.914666  121338 wrap.go:47] GET /healthz: (945.185µs) 500
goroutine 28003 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e34a9a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e34a9a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc000201a20, 0x1f4)
net/http.Error(0x7fc633708540, 0xc005579a10, 0xc00edd7cc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc005579a10, 0xc0030aa900)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc005579a10, 0xc0030aa900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc005579a10, 0xc0030aa900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc005579a10, 0xc0030aa900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc005579a10, 0xc0030aa900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc005579a10, 0xc0030aa900)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc005579a10, 0xc0030aa900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc005579a10, 0xc0030aa900)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc005579a10, 0xc0030aa900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc005579a10, 0xc0030aa900)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc005579a10, 0xc0030aa900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc005579a10, 0xc0030aa800)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc005579a10, 0xc0030aa800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0075d7440, 0xc00fd34c20, 0x604d680, 0xc005579a10, 0xc0030aa800)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38262]
I0110 19:49:05.933248  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:certificate-controller: (1.314416ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.954168  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.221142ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.954473  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:certificate-controller
I0110 19:49:05.973260  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pvc-protection-controller: (1.367247ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.994481  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.532658ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:05.994804  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I0110 19:49:06.013350  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pv-protection-controller: (1.352504ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.014469  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:06.014681  121338 wrap.go:47] GET /healthz: (899.959µs) 500
goroutine 28020 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00b7cdb90, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00b7cdb90, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc001125100, 0x1f4)
net/http.Error(0x7fc633708540, 0xc003d43128, 0xc003302280, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc003d43128, 0xc00bb5f300)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc003d43128, 0xc00bb5f300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc003d43128, 0xc00bb5f300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc003d43128, 0xc00bb5f300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc003d43128, 0xc00bb5f300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc003d43128, 0xc00bb5f300)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc003d43128, 0xc00bb5f300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc003d43128, 0xc00bb5f300)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc003d43128, 0xc00bb5f300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc003d43128, 0xc00bb5f300)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc003d43128, 0xc00bb5f300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc003d43128, 0xc00bb5f200)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc003d43128, 0xc00bb5f200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0073cec60, 0xc00fd34c20, 0x604d680, 0xc003d43128, 0xc00bb5f200)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38262]
I0110 19:49:06.034009  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.159353ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.034324  121338 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I0110 19:49:06.053036  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/extension-apiserver-authentication-reader: (1.075187ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.054802  121338 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.231647ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.076418  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.428991ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.077188  121338 storage_rbac.go:246] created role.rbac.authorization.k8s.io/extension-apiserver-authentication-reader in kube-system
I0110 19:49:06.093875  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:bootstrap-signer: (1.501402ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.095449  121338 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.18483ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.113828  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (1.994478ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.114016  121338 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I0110 19:49:06.114294  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:06.114477  121338 wrap.go:47] GET /healthz: (833.579µs) 500
goroutine 28001 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0b3d50, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0b3d50, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc001c237e0, 0x1f4)
net/http.Error(0x7fc633708540, 0xc000f80c00, 0xc001f86a00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc000f80c00, 0xc00509d000)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc000f80c00, 0xc00509d000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc000f80c00, 0xc00509d000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc000f80c00, 0xc00509d000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc000f80c00, 0xc00509d000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc000f80c00, 0xc00509d000)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc000f80c00, 0xc00509d000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc000f80c00, 0xc00509d000)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc000f80c00, 0xc00509d000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc000f80c00, 0xc00509d000)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc000f80c00, 0xc00509d000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc000f80c00, 0xc00509cf00)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc000f80c00, 0xc00509cf00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00830f080, 0xc00fd34c20, 0x604d680, 0xc000f80c00, 0xc00509cf00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38238]
I0110 19:49:06.133317  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:cloud-provider: (1.442278ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.135567  121338 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.711172ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.153882  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.072888ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.154165  121338 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0110 19:49:06.172959  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:token-cleaner: (1.162727ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.174975  121338 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.477787ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.194027  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.12258ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.194313  121338 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I0110 19:49:06.214864  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-controller-manager: (2.731749ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.215680  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:06.215912  121338 wrap.go:47] GET /healthz: (1.889897ms) 500
goroutine 28048 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e3715e0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e3715e0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc001f915c0, 0x1f4)
net/http.Error(0x7fc633708540, 0xc001f28b40, 0xc000077400, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc001f28b40, 0xc004289c00)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc001f28b40, 0xc004289c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc001f28b40, 0xc004289c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc001f28b40, 0xc004289c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc001f28b40, 0xc004289c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc001f28b40, 0xc004289c00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc001f28b40, 0xc004289c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc001f28b40, 0xc004289c00)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc001f28b40, 0xc004289c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc001f28b40, 0xc004289c00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc001f28b40, 0xc004289c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc001f28b40, 0xc004289700)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc001f28b40, 0xc004289700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc002714ba0, 0xc00fd34c20, 0x604d680, 0xc001f28b40, 0xc004289700)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38262]
I0110 19:49:06.218923  121338 wrap.go:47] GET /api/v1/namespaces/kube-system: (3.284375ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.234901  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.89499ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.235423  121338 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I0110 19:49:06.253283  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-scheduler: (1.386704ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.255402  121338 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.648935ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.274336  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.444882ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.274671  121338 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I0110 19:49:06.293201  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles/system:controller:bootstrap-signer: (1.275442ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.295041  121338 wrap.go:47] GET /api/v1/namespaces/kube-public: (1.335001ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.314226  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles: (2.239339ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.314439  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:06.314515  121338 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I0110 19:49:06.314626  121338 wrap.go:47] GET /healthz: (932.628µs) 500
goroutine 28054 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e612930, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e612930, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc002ec8ac0, 0x1f4)
net/http.Error(0x7fc633708540, 0xc000f80e30, 0xc00264ea00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc000f80e30, 0xc0016c4700)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc000f80e30, 0xc0016c4700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc000f80e30, 0xc0016c4700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc000f80e30, 0xc0016c4700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc000f80e30, 0xc0016c4700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc000f80e30, 0xc0016c4700)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc000f80e30, 0xc0016c4700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc000f80e30, 0xc0016c4700)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc000f80e30, 0xc0016c4700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc000f80e30, 0xc0016c4700)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc000f80e30, 0xc0016c4700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc000f80e30, 0xc0016c4600)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc000f80e30, 0xc0016c4600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00830fc20, 0xc00fd34c20, 0x604d680, 0xc000f80e30, 0xc0016c4600)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38262]
I0110 19:49:06.333420  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-controller-manager: (1.428689ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.335342  121338 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.356282ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.354258  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.308384ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.354542  121338 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I0110 19:49:06.373203  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-scheduler: (1.322141ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.374998  121338 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.328825ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.394261  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.30028ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.394539  121338 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I0110 19:49:06.413268  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:bootstrap-signer: (1.387574ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.414334  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:06.414531  121338 wrap.go:47] GET /healthz: (891.783µs) 500
goroutine 28016 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e63f1f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e63f1f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc002f864c0, 0x1f4)
net/http.Error(0x7fc633708540, 0xc00020c628, 0xc001eeec80, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc00020c628, 0xc00b604f00)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc00020c628, 0xc00b604f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc00020c628, 0xc00b604f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc00020c628, 0xc00b604f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc00020c628, 0xc00b604f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc00020c628, 0xc00b604f00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc00020c628, 0xc00b604f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc00020c628, 0xc00b604f00)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc00020c628, 0xc00b604f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc00020c628, 0xc00b604f00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc00020c628, 0xc00b604f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc00020c628, 0xc00b604e00)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc00020c628, 0xc00b604e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006226660, 0xc00fd34c20, 0x604d680, 0xc00020c628, 0xc00b604e00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38262]
I0110 19:49:06.415004  121338 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.221896ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.434071  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.140819ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.434337  121338 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I0110 19:49:06.453586  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:cloud-provider: (1.455486ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.455358  121338 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.254641ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.474162  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.248855ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.474444  121338 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0110 19:49:06.493372  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:token-cleaner: (1.460999ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.495225  121338 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.37414ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.514202  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.274091ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.514532  121338 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I0110 19:49:06.514623  121338 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0110 19:49:06.514786  121338 wrap.go:47] GET /healthz: (943.51µs) 500
goroutine 28132 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00edffc00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00edffc00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0030687e0, 0x1f4)
net/http.Error(0x7fc633708540, 0xc001f28fa0, 0xc003302640, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fc633708540, 0xc001f28fa0, 0xc00f728e00)
net/http.HandlerFunc.ServeHTTP(0xc0099914a0, 0x7fc633708540, 0xc001f28fa0, 0xc00f728e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc008e5e1c0, 0x7fc633708540, 0xc001f28fa0, 0xc00f728e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00fce9650, 0x7fc633708540, 0xc001f28fa0, 0xc00f728e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e9527, 0xe, 0xc00fcb5560, 0xc00fce9650, 0x7fc633708540, 0xc001f28fa0, 0xc00f728e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fc633708540, 0xc001f28fa0, 0xc00f728e00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c100, 0x7fc633708540, 0xc001f28fa0, 0xc00f728e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fc633708540, 0xc001f28fa0, 0xc00f728e00)
net/http.HandlerFunc.ServeHTTP(0xc00fd38750, 0x7fc633708540, 0xc001f28fa0, 0xc00f728e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fc633708540, 0xc001f28fa0, 0xc00f728e00)
net/http.HandlerFunc.ServeHTTP(0xc00fd3c140, 0x7fc633708540, 0xc001f28fa0, 0xc00f728e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fc633708540, 0xc001f28fa0, 0xc00f728d00)
net/http.HandlerFunc.ServeHTTP(0xc00d90caa0, 0x7fc633708540, 0xc001f28fa0, 0xc00f728d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc007907da0, 0xc00fd34c20, 0x604d680, 0xc001f28fa0, 0xc00f728d00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38262]
I0110 19:49:06.533211  121338 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings/system:controller:bootstrap-signer: (1.343263ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.535385  121338 wrap.go:47] GET /api/v1/namespaces/kube-public: (1.743582ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.554124  121338 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings: (2.246345ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.554388  121338 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I0110 19:49:06.614756  121338 wrap.go:47] GET /healthz: (944.203µs) 200 [Go-http-client/1.1 127.0.0.1:38262]
W0110 19:49:06.615629  121338 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0110 19:49:06.615693  121338 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0110 19:49:06.615729  121338 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0110 19:49:06.615748  121338 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0110 19:49:06.615767  121338 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0110 19:49:06.615791  121338 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0110 19:49:06.615810  121338 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0110 19:49:06.615857  121338 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0110 19:49:06.615878  121338 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0110 19:49:06.615896  121338 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
I0110 19:49:06.616030  121338 factory.go:745] Creating scheduler from algorithm provider 'DefaultProvider'
I0110 19:49:06.616058  121338 factory.go:826] Creating scheduler with fit predicates 'map[MaxGCEPDVolumeCount:{} CheckNodeMemoryPressure:{} CheckNodeDiskPressure:{} CheckNodeCondition:{} CheckVolumeBinding:{} MaxEBSVolumeCount:{} MatchInterPodAffinity:{} GeneralPredicates:{} MaxCSIVolumeCountPred:{} NoDiskConflict:{} PodToleratesNodeTaints:{} NoVolumeZoneConflict:{} MaxAzureDiskVolumeCount:{} CheckNodePIDPressure:{}]' and priority functions 'map[NodeAffinityPriority:{} TaintTolerationPriority:{} ImageLocalityPriority:{} SelectorSpreadPriority:{} InterPodAffinityPriority:{} LeastRequestedPriority:{} BalancedResourceAllocation:{} NodePreferAvoidPodsPriority:{}]'
I0110 19:49:06.616195  121338 controller_utils.go:1021] Waiting for caches to sync for scheduler controller
I0110 19:49:06.616473  121338 reflector.go:131] Starting reflector *v1.Pod (12h0m0s) from k8s.io/kubernetes/test/integration/scheduler/util.go:194
I0110 19:49:06.616516  121338 reflector.go:169] Listing and watching *v1.Pod from k8s.io/kubernetes/test/integration/scheduler/util.go:194
I0110 19:49:06.617586  121338 wrap.go:47] GET /api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: (729.967µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38262]
I0110 19:49:06.618362  121338 get.go:251] Starting watch for /api/v1/pods, rv=18323 labels= fields=status.phase!=Failed,status.phase!=Succeeded timeout=5m19s
I0110 19:49:06.716629  121338 shared_informer.go:123] caches populated
I0110 19:49:06.716697  121338 controller_utils.go:1028] Caches are synced for scheduler controller
I0110 19:49:06.717419  121338 reflector.go:131] Starting reflector *v1.StatefulSet (1s) from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.717473  121338 reflector.go:169] Listing and watching *v1.StatefulSet from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.717598  121338 reflector.go:131] Starting reflector *v1.ReplicaSet (1s) from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.717686  121338 reflector.go:169] Listing and watching *v1.ReplicaSet from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.718001  121338 reflector.go:131] Starting reflector *v1.ReplicationController (1s) from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.718028  121338 reflector.go:169] Listing and watching *v1.ReplicationController from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.718031  121338 reflector.go:131] Starting reflector *v1.PersistentVolumeClaim (1s) from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.718046  121338 reflector.go:169] Listing and watching *v1.PersistentVolumeClaim from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.718163  121338 reflector.go:131] Starting reflector *v1.StorageClass (1s) from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.718191  121338 reflector.go:169] Listing and watching *v1.StorageClass from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.717601  121338 reflector.go:131] Starting reflector *v1.PersistentVolume (1s) from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.718351  121338 reflector.go:169] Listing and watching *v1.PersistentVolume from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.718973  121338 reflector.go:131] Starting reflector *v1.Service (1s) from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.719012  121338 reflector.go:169] Listing and watching *v1.Service from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.719170  121338 reflector.go:131] Starting reflector *v1.Node (1s) from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.719198  121338 reflector.go:131] Starting reflector *v1beta1.PodDisruptionBudget (1s) from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.719220  121338 reflector.go:169] Listing and watching *v1beta1.PodDisruptionBudget from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.719210  121338 reflector.go:169] Listing and watching *v1.Node from k8s.io/client-go/informers/factory.go:132
I0110 19:49:06.721033  121338 wrap.go:47] GET /apis/apps/v1/replicasets?limit=500&resourceVersion=0: (978.159µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38448]
I0110 19:49:06.721039  121338 wrap.go:47] GET /apis/apps/v1/statefulsets?limit=500&resourceVersion=0: (1.142971ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.721160  121338 wrap.go:47] GET /apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: (718.713µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38452]
I0110 19:49:06.721427  121338 wrap.go:47] GET /api/v1/replicationcontrollers?limit=500&resourceVersion=0: (760.675µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38454]
I0110 19:49:06.722164  121338 wrap.go:47] GET /api/v1/services?limit=500&resourceVersion=0: (968.516µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38458]
I0110 19:49:06.722639  121338 wrap.go:47] GET /api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: (2.247519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38450]
I0110 19:49:06.722700  121338 get.go:251] Starting watch for /apis/storage.k8s.io/v1/storageclasses, rv=18330 labels= fields= timeout=9m59s
I0110 19:49:06.723005  121338 get.go:251] Starting watch for /apis/apps/v1/statefulsets, rv=18331 labels= fields= timeout=6m52s
I0110 19:49:06.723319  121338 wrap.go:47] GET /apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: (1.928361ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38460]
I0110 19:49:06.723986  121338 wrap.go:47] GET /api/v1/nodes?limit=500&resourceVersion=0: (814.402µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38238]
I0110 19:49:06.724039  121338 get.go:251] Starting watch for /api/v1/replicationcontrollers, rv=18323 labels= fields= timeout=7m18s
I0110 19:49:06.724292  121338 get.go:251] Starting watch for /api/v1/persistentvolumeclaims, rv=18322 labels= fields= timeout=6m7s
I0110 19:49:06.724300  121338 get.go:251] Starting watch for /apis/policy/v1beta1/poddisruptionbudgets, rv=18326 labels= fields= timeout=9m59s
I0110 19:49:06.724970  121338 get.go:251] Starting watch for /api/v1/services, rv=18340 labels= fields= timeout=7m59s
I0110 19:49:06.724992  121338 get.go:251] Starting watch for /api/v1/nodes, rv=18322 labels= fields= timeout=6m55s
I0110 19:49:06.725333  121338 wrap.go:47] GET /api/v1/persistentvolumes?limit=500&resourceVersion=0: (2.93131ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38456]
I0110 19:49:06.726445  121338 get.go:251] Starting watch for /apis/apps/v1/replicasets, rv=18331 labels= fields= timeout=8m17s
I0110 19:49:06.726457  121338 get.go:251] Starting watch for /api/v1/persistentvolumes, rv=18322 labels= fields= timeout=7m45s
I0110 19:49:06.817405  121338 shared_informer.go:123] caches populated
I0110 19:49:06.917801  121338 shared_informer.go:123] caches populated
I0110 19:49:07.018082  121338 shared_informer.go:123] caches populated
I0110 19:49:07.118521  121338 shared_informer.go:123] caches populated
I0110 19:49:07.218810  121338 shared_informer.go:123] caches populated
I0110 19:49:07.319161  121338 shared_informer.go:123] caches populated
I0110 19:49:07.419613  121338 shared_informer.go:123] caches populated
I0110 19:49:07.519866  121338 shared_informer.go:123] caches populated
I0110 19:49:07.620127  121338 shared_informer.go:123] caches populated
I0110 19:49:07.720379  121338 shared_informer.go:123] caches populated
I0110 19:49:07.721904  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:07.723191  121338 wrap.go:47] POST /api/v1/nodes: (2.055557ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38602]
I0110 19:49:07.723531  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:07.723973  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:07.724773  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:07.725926  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:07.725944  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.207776ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38602]
I0110 19:49:07.726282  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0
I0110 19:49:07.726299  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0
I0110 19:49:07.726436  121338 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0", node "node1"
I0110 19:49:07.726457  121338 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I0110 19:49:07.726521  121338 factory.go:1166] Attempting to bind rpod-0 to node1
I0110 19:49:07.728517  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.120484ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38602]
I0110 19:49:07.728709  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1
I0110 19:49:07.728741  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1
I0110 19:49:07.728858  121338 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1", node "node1"
I0110 19:49:07.728875  121338 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I0110 19:49:07.728913  121338 factory.go:1166] Attempting to bind rpod-1 to node1
I0110 19:49:07.730137  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-0/binding: (3.152854ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38606]
I0110 19:49:07.730332  121338 scheduler.go:569] pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0110 19:49:07.731524  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1/binding: (1.633835ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.731706  121338 scheduler.go:569] pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0110 19:49:07.732154  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.504734ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38606]
I0110 19:49:07.734759  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.576868ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.832657  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-0: (2.171415ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.935628  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1: (2.069428ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.936050  121338 preemption_test.go:561] Creating the preemptor pod...
I0110 19:49:07.938790  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.397436ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.938922  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:07.938943  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:07.939083  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:07.939132  121338 preemption_test.go:567] Creating additional pods...
I0110 19:49:07.939137  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:07.942052  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (2.291851ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38644]
I0110 19:49:07.942050  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod/status: (2.282419ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38646]
I0110 19:49:07.942344  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.94113ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.943767  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (4.357124ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38602]
I0110 19:49:07.945242  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (1.557936ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.945485  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:07.945623  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.182155ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38646]
I0110 19:49:07.948171  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod/status: (2.291907ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.948605  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.520548ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38602]
I0110 19:49:07.951657  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.951436ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38602]
I0110 19:49:07.954177  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.064891ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38602]
I0110 19:49:07.955756  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1: (6.008942ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.957002  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:07.957019  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:07.957168  121338 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod", node "node1"
I0110 19:49:07.957180  121338 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I0110 19:49:07.957218  121338 factory.go:1166] Attempting to bind preemptor-pod to node1
I0110 19:49:07.957338  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.607645ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38602]
I0110 19:49:07.958346  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4
I0110 19:49:07.958362  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4
I0110 19:49:07.958453  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:07.958523  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:07.958833  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.640201ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.959335  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod/binding: (1.650091ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38602]
I0110 19:49:07.959523  121338 scheduler.go:569] pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0110 19:49:07.959844  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.129656ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38654]
I0110 19:49:07.961246  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.027627ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.961640  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (2.570699ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38658]
I0110 19:49:07.961661  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4/status: (2.621063ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38656]
I0110 19:49:07.965357  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (4.155066ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38654]
I0110 19:49:07.965357  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (3.369487ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38656]
I0110 19:49:07.965711  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:07.965876  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5
I0110 19:49:07.965899  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5
I0110 19:49:07.966001  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:07.966075  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:07.966795  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (5.114012ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.967818  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.929185ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38656]
I0110 19:49:07.968176  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (1.872148ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38602]
I0110 19:49:07.968701  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5/status: (2.221321ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38660]
I0110 19:49:07.968884  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.475017ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.970209  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.688771ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38656]
I0110 19:49:07.971043  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (1.986111ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38660]
I0110 19:49:07.972044  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:07.972186  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4
I0110 19:49:07.972205  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4
I0110 19:49:07.972290  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:07.972328  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:07.972357  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.609878ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.974197  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (1.513562ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.974464  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4/status: (1.766942ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38660]
I0110 19:49:07.974741  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.720692ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38664]
I0110 19:49:07.976125  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (1.259493ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38660]
I0110 19:49:07.976401  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:07.976584  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-4.1578947fa566f0c2: (3.762699ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38602]
I0110 19:49:07.976956  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9
I0110 19:49:07.976983  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9
I0110 19:49:07.977086  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:07.977138  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:07.977181  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.112549ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38664]
I0110 19:49:07.979721  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.644496ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38666]
I0110 19:49:07.980097  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.605726ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.980201  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (2.702102ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38664]
I0110 19:49:07.980842  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9/status: (3.392305ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38660]
I0110 19:49:07.981669  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.593316ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38666]
I0110 19:49:07.982545  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (1.175233ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38664]
I0110 19:49:07.982836  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:07.983001  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11
I0110 19:49:07.983022  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11
I0110 19:49:07.983146  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:07.983203  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:07.983877  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.811217ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38666]
I0110 19:49:07.984620  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (1.157726ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.985173  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11/status: (1.64897ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38664]
I0110 19:49:07.986520  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.710077ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38668]
I0110 19:49:07.986633  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.369311ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38666]
I0110 19:49:07.986865  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (1.162435ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38664]
I0110 19:49:07.987148  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:07.987341  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:07.987357  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:07.987523  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:07.987579  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:07.989430  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.45912ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0110 19:49:07.989635  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.630748ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38670]
I0110 19:49:07.989700  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14/status: (1.890572ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.990135  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.28969ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38664]
I0110 19:49:07.991403  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.379664ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.992013  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:07.992444  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:07.992473  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:07.992621  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:07.992685  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:07.994474  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.803884ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38664]
I0110 19:49:07.995373  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.515218ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0110 19:49:07.995683  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.694699ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38674]
I0110 19:49:07.996365  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16/status: (2.378437ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0110 19:49:07.996475  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.492898ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38664]
I0110 19:49:07.998222  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.201017ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0110 19:49:07.998554  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:07.998658  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.611263ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38674]
I0110 19:49:07.998840  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:07.998861  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:07.998941  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:07.998987  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.000151  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (1.00069ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0110 19:49:08.000976  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.938342ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38674]
I0110 19:49:08.001009  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.37465ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38678]
I0110 19:49:08.001252  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17/status: (1.660878ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0110 19:49:08.002715  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (1.113179ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0110 19:49:08.002989  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.003130  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.766438ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38674]
I0110 19:49:08.003277  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:08.003294  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:08.003404  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.003464  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.005152  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (1.179999ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0110 19:49:08.005607  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.993477ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0110 19:49:08.006212  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.999492ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38682]
I0110 19:49:08.006529  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19/status: (2.558416ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0110 19:49:08.007863  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.56407ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0110 19:49:08.008115  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (1.205263ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0110 19:49:08.008438  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.008642  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-0
I0110 19:49:08.008671  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-0
I0110 19:49:08.008743  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.008785  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.010393  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.716676ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38682]
I0110 19:49:08.010923  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-0/status: (1.58967ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0110 19:49:08.010996  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-0: (1.610531ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38684]
I0110 19:49:08.012254  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.421493ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38682]
I0110 19:49:08.012744  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-0: (1.087651ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0110 19:49:08.012932  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.340378ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38686]
I0110 19:49:08.013001  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.013187  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-2
I0110 19:49:08.013216  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-2
I0110 19:49:08.013306  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.013351  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.014338  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.664857ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38684]
I0110 19:49:08.015751  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.771717ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38688]
I0110 19:49:08.015816  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2/status: (2.151856ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38682]
I0110 19:49:08.015965  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2: (2.328666ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0110 19:49:08.017135  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.576033ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38684]
I0110 19:49:08.017599  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2: (1.138818ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38682]
I0110 19:49:08.017853  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.018058  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5
I0110 19:49:08.018081  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5
I0110 19:49:08.018187  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.018245  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.019358  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.725483ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38684]
I0110 19:49:08.019843  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (1.300441ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0110 19:49:08.020003  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5/status: (1.57044ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38682]
I0110 19:49:08.021928  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.999841ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38690]
I0110 19:49:08.021963  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (1.5341ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0110 19:49:08.022382  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.022635  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:08.022657  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:08.022756  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.022803  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.023295  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-5.1578947fa5da268c: (3.657343ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38684]
I0110 19:49:08.024560  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14/status: (1.571391ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0110 19:49:08.024872  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.54442ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38690]
I0110 19:49:08.025154  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.411407ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38684]
I0110 19:49:08.026463  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.426914ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0110 19:49:08.026692  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.026852  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:08.026875  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:08.026881  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.495252ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38690]
I0110 19:49:08.026987  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.027030  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.028910  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-14.1578947fa722507e: (4.609863ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38694]
I0110 19:49:08.029050  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.783654ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38684]
I0110 19:49:08.029833  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18/status: (2.560892ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0110 19:49:08.030978  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.316998ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38690]
I0110 19:49:08.031359  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.878269ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38684]
I0110 19:49:08.031794  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.442799ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0110 19:49:08.032048  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.032317  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8
I0110 19:49:08.032335  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8
I0110 19:49:08.032484  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.032626  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.034131  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (1.268583ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38694]
I0110 19:49:08.034261  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.493735ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38690]
I0110 19:49:08.034794  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.519268ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38696]
I0110 19:49:08.035139  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8/status: (1.943147ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0110 19:49:08.036260  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.566362ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38690]
I0110 19:49:08.036843  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (1.20682ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38696]
I0110 19:49:08.037136  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.037342  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:08.037362  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:08.037459  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.037509  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.038712  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.793201ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38690]
I0110 19:49:08.039816  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (2.09973ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38696]
I0110 19:49:08.040259  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16/status: (2.509774ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38694]
I0110 19:49:08.040858  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.72111ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38690]
I0110 19:49:08.041884  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-16.1578947fa7702016: (3.448026ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38698]
I0110 19:49:08.042211  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.454829ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38694]
I0110 19:49:08.042581  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.042762  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:08.042781  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:08.042886  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.042923  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.043267  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.626218ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38690]
I0110 19:49:08.045469  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (2.194344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38696]
I0110 19:49:08.045469  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.703627ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38700]
I0110 19:49:08.045473  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.896053ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38690]
I0110 19:49:08.046189  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20/status: (2.817797ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38698]
I0110 19:49:08.048318  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.69369ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38698]
I0110 19:49:08.048472  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.434535ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38690]
I0110 19:49:08.048626  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.048784  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3
I0110 19:49:08.048882  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3
I0110 19:49:08.049019  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.049088  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.050973  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.932618ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38698]
I0110 19:49:08.053311  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3/status: (3.978841ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38702]
I0110 19:49:08.053812  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.753729ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38706]
I0110 19:49:08.053812  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.213344ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38698]
I0110 19:49:08.054586  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (3.917444ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.055387  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (1.245301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38702]
I0110 19:49:08.055713  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.055814  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.613622ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38706]
I0110 19:49:08.055913  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10
I0110 19:49:08.055924  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10
I0110 19:49:08.056014  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.056058  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.058026  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (1.750792ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.058177  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.582037ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0110 19:49:08.058269  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10/status: (1.774987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38708]
I0110 19:49:08.058314  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.008757ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38698]
I0110 19:49:08.059824  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (1.099264ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.060068  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.060258  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:08.060270  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:08.060380  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.603666ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0110 19:49:08.060383  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.060425  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.062652  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17/status: (1.874465ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0110 19:49:08.063328  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (1.697159ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.063448  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.319468ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38712]
I0110 19:49:08.064717  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (1.512194ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0110 19:49:08.064967  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.065102  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:08.065111  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:08.065187  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.065242  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.069086  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (3.480825ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.069756  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (5.897243ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38712]
I0110 19:49:08.070256  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21/status: (4.356826ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0110 19:49:08.072585  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (1.522526ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0110 19:49:08.072853  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.073631  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.932621ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38712]
I0110 19:49:08.078843  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (4.779471ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0110 19:49:08.079190  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12
I0110 19:49:08.079211  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12
I0110 19:49:08.079386  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.079436  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.082691  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (2.831803ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.083812  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12/status: (4.089626ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0110 19:49:08.085118  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-17.1578947fa7d06098: (23.962501ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38714]
I0110 19:49:08.087521  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (3.18052ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0110 19:49:08.087799  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.087946  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-1
I0110 19:49:08.087962  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-1
I0110 19:49:08.088036  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.088096  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.089730  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (4.024763ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38714]
I0110 19:49:08.090021  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1: (1.654653ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.090576  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1/status: (2.207374ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0110 19:49:08.092460  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.98827ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.092945  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1: (1.991246ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0110 19:49:08.093216  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.093446  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6
I0110 19:49:08.093469  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6
I0110 19:49:08.093641  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.093704  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.094660  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.70366ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.094968  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.024245ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38714]
I0110 19:49:08.095951  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6/status: (2.023754ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0110 19:49:08.096468  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.444163ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.097841  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.380727ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0110 19:49:08.098614  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.098826  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:08.098843  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:08.098934  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.098977  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.101067  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.270231ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38746]
I0110 19:49:08.101654  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (2.019459ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38714]
I0110 19:49:08.101698  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13/status: (2.472768ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.103346  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (1.276993ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.103622  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.103788  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9
I0110 19:49:08.103811  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9
I0110 19:49:08.103906  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.103952  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.108588  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9/status: (4.31787ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.108594  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (3.768677ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38746]
I0110 19:49:08.110455  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (1.390053ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38746]
I0110 19:49:08.110796  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.111438  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-7
I0110 19:49:08.111455  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-7
I0110 19:49:08.111621  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-9.1578947fa682f7d6: (6.811009ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38748]
I0110 19:49:08.111614  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.111734  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.113514  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.400276ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.113997  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7/status: (1.98464ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38746]
I0110 19:49:08.115025  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (1.165321ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.115447  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (1.025641ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38746]
I0110 19:49:08.115740  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.115872  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11
I0110 19:49:08.115891  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11
I0110 19:49:08.115995  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.116051  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.117530  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (1.23568ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.118378  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11/status: (2.040572ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38750]
I0110 19:49:08.119383  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-11.1578947fa6df884a: (2.673167ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38752]
I0110 19:49:08.119851  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (1.055803ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38750]
I0110 19:49:08.120153  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.120372  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:08.120391  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:08.120515  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.120569  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.122026  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (1.145359ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.122657  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.447255ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0110 19:49:08.122933  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15/status: (2.069979ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38752]
I0110 19:49:08.124675  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (1.298323ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0110 19:49:08.124930  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.125081  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:08.125122  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:08.125239  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.125279  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.126795  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (1.244759ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0110 19:49:08.127120  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15/status: (1.519195ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.128625  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-15.1578947faf0f9344: (2.574051ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38756]
I0110 19:49:08.128642  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (1.049844ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0110 19:49:08.128938  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.129097  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:08.129111  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:08.129202  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.129250  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.131323  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.699478ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0110 19:49:08.132040  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.195513ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38758]
I0110 19:49:08.132169  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23/status: (2.306173ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38756]
I0110 19:49:08.133798  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.180618ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38758]
I0110 19:49:08.134065  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.134265  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:08.134283  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:08.134365  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.134411  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.136019  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.316334ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0110 19:49:08.136930  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47/status: (2.244639ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38758]
I0110 19:49:08.148191  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (13.132971ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38760]
I0110 19:49:08.151452  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (13.99498ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38758]
I0110 19:49:08.151867  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.152134  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:08.152149  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:08.152267  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.152320  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.154772  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.515462ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0110 19:49:08.154859  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23/status: (2.179018ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38760]
I0110 19:49:08.155926  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-23.1578947faf93dddf: (2.575555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38762]
I0110 19:49:08.156627  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.36145ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38760]
I0110 19:49:08.156913  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.157111  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:08.157127  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:08.157218  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.157269  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.158779  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.167519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0110 19:49:08.158991  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47/status: (1.507241ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38762]
I0110 19:49:08.160320  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-47.1578947fafe2c7aa: (2.225323ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0110 19:49:08.160460  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.081456ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38762]
I0110 19:49:08.160791  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.161000  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10
I0110 19:49:08.161016  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10
I0110 19:49:08.161120  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.161174  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.162932  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (1.518873ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0110 19:49:08.163274  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10/status: (1.839576ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0110 19:49:08.165293  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-10.1578947fab373b2b: (3.357994ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38766]
I0110 19:49:08.175414  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (11.699796ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0110 19:49:08.175895  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.176102  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:08.176122  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:08.176237  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.176295  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.178067  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.430409ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0110 19:49:08.178861  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.599893ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0110 19:49:08.178955  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46/status: (2.318928ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38766]
I0110 19:49:08.180675  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.30197ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38766]
I0110 19:49:08.180691  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (1.322145ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0110 19:49:08.180969  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.181146  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:08.181163  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:08.181260  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.181307  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.183270  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.586624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0110 19:49:08.183467  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.498523ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38770]
I0110 19:49:08.183677  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44/status: (2.128778ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38766]
I0110 19:49:08.185187  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.065875ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38770]
I0110 19:49:08.185514  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.185730  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:08.185744  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:08.185837  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.185885  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.187320  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.16746ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0110 19:49:08.188037  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46/status: (1.91783ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38770]
I0110 19:49:08.188997  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-46.1578947fb261e43f: (2.335339ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38772]
I0110 19:49:08.190410  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.290379ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38770]
I0110 19:49:08.190765  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.190952  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:08.190966  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:08.191065  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.191116  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.192964  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.156473ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0110 19:49:08.194440  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44/status: (2.623971ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38772]
I0110 19:49:08.194813  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-44.1578947fb2ae5b6d: (2.832359ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38774]
I0110 19:49:08.196094  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.212787ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38772]
I0110 19:49:08.196426  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.196634  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:08.196651  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:08.196783  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.196839  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.198393  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.248383ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0110 19:49:08.198865  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.394435ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38776]
I0110 19:49:08.199279  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43/status: (2.121983ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38774]
I0110 19:49:08.201104  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.338588ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38776]
I0110 19:49:08.201361  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.201480  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3
I0110 19:49:08.201509  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3
I0110 19:49:08.201602  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.201676  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.203226  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (1.324929ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38776]
I0110 19:49:08.203749  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3/status: (1.844379ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0110 19:49:08.204903  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-3.1578947faaccb975: (2.51618ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38778]
I0110 19:49:08.205208  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (1.083924ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38776]
I0110 19:49:08.205552  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.205750  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:08.205767  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:08.205882  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.205944  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.207458  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.195314ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38778]
I0110 19:49:08.208147  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43/status: (1.879445ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0110 19:49:08.208944  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-43.1578947fb39b5540: (2.300376ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0110 19:49:08.209614  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.047576ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0110 19:49:08.209886  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.210009  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:08.210025  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:08.210122  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.210185  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.211621  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.204806ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38778]
I0110 19:49:08.212639  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41/status: (2.230042ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0110 19:49:08.212680  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.92792ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0110 19:49:08.214304  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.24219ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0110 19:49:08.214607  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.214813  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:08.214835  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:08.214940  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.214992  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.216514  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.18622ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38778]
I0110 19:49:08.217181  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.578238ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38784]
I0110 19:49:08.217248  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39/status: (2.003867ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0110 19:49:08.219065  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.390766ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38784]
I0110 19:49:08.219408  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.219640  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:08.219656  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:08.219754  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.219807  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.221290  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.225998ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38778]
I0110 19:49:08.221895  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41/status: (1.836677ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38784]
I0110 19:49:08.223192  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-41.1578947fb466d288: (2.46638ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38786]
I0110 19:49:08.223436  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.094861ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38784]
I0110 19:49:08.223788  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.223933  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:08.223947  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:08.224033  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.224075  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.225570  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.227918ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38778]
I0110 19:49:08.226035  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39/status: (1.726581ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38786]
I0110 19:49:08.227119  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-39.1578947fb4b05b51: (2.280976ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38788]
I0110 19:49:08.227645  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.227491ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38786]
I0110 19:49:08.228001  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.228153  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:08.228197  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:08.228322  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.228364  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.229802  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.233033ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38778]
I0110 19:49:08.230391  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28/status: (1.798436ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38788]
I0110 19:49:08.230590  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.607922ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38790]
I0110 19:49:08.232764  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.924727ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38788]
I0110 19:49:08.233107  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.233318  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:08.233334  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:08.233434  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.233483  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.235106  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.354658ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38778]
I0110 19:49:08.235656  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.646265ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38792]
I0110 19:49:08.235927  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38/status: (2.176192ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38790]
I0110 19:49:08.237836  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.402613ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38792]
I0110 19:49:08.238071  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.238308  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:08.238324  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:08.238404  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.238446  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.240005  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.30027ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38778]
I0110 19:49:08.240340  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28/status: (1.644425ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38792]
I0110 19:49:08.241562  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-28.1578947fb57c7547: (2.353968ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38794]
I0110 19:49:08.241856  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.066141ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38792]
I0110 19:49:08.242087  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.242244  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:08.242261  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:08.242319  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.242351  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.243776  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.067893ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38778]
I0110 19:49:08.243998  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38/status: (1.426207ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38794]
I0110 19:49:08.245130  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-38.1578947fb5ca846c: (2.045678ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38796]
I0110 19:49:08.245573  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.04921ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38794]
I0110 19:49:08.245880  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.246066  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:08.246123  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:08.246298  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.246357  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.247742  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.159029ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38796]
I0110 19:49:08.248141  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.211986ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38798]
I0110 19:49:08.248544  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36/status: (1.93123ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38778]
I0110 19:49:08.250091  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.157986ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38798]
I0110 19:49:08.250309  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.250446  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:08.250461  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:08.250551  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.250592  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.252267  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.189693ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38796]
I0110 19:49:08.252676  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.511462ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38800]
I0110 19:49:08.252761  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22/status: (1.958121ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38798]
I0110 19:49:08.254518  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.18514ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38800]
I0110 19:49:08.254821  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.255105  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:08.255122  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:08.255241  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.255301  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.256714  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.131604ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38796]
I0110 19:49:08.257566  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36/status: (1.987876ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38800]
I0110 19:49:08.258866  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-36.1578947fb68ef0e1: (2.423101ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38802]
I0110 19:49:08.259017  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.036001ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38800]
I0110 19:49:08.259345  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.259571  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:08.259589  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:08.259683  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.259770  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.261454  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.424651ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38796]
I0110 19:49:08.261670  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22/status: (1.592882ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38802]
I0110 19:49:08.263297  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-22.1578947fb6cf91ca: (2.740255ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0110 19:49:08.263472  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.094673ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38802]
I0110 19:49:08.263793  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.263988  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:08.264004  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:08.264096  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.264142  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.265606  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.007852ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0110 19:49:08.266200  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.503781ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38806]
I0110 19:49:08.266478  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34/status: (1.884845ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38796]
I0110 19:49:08.268245  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.216252ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38806]
I0110 19:49:08.268474  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.268679  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:08.268698  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:08.268813  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.268865  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.270842  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.606362ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0110 19:49:08.271221  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18/status: (2.151092ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38806]
I0110 19:49:08.273030  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-18.1578947fa97c4ac8: (3.383419ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38808]
I0110 19:49:08.273169  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.327466ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38806]
I0110 19:49:08.273414  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.273635  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:08.273660  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:08.273805  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.273881  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.275378  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.23097ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0110 19:49:08.275974  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34/status: (1.836422ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38808]
I0110 19:49:08.277132  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-34.1578947fb79e54d9: (2.462586ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38810]
I0110 19:49:08.277484  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.01947ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38808]
I0110 19:49:08.277862  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.278056  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:08.278076  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:08.278260  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.278315  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.279719  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.169955ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38810]
I0110 19:49:08.282148  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.319839ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38812]
I0110 19:49:08.282394  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32/status: (3.795872ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0110 19:49:08.282933  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (1.202649ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38810]
I0110 19:49:08.283260  121338 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I0110 19:49:08.284085  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.203275ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0110 19:49:08.284411  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.284581  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-0: (1.121116ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38810]
I0110 19:49:08.284715  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:08.284738  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:08.284858  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.284907  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.286533  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.146992ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0110 19:49:08.286607  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1: (1.642701ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0110 19:49:08.287846  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.91644ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38816]
I0110 19:49:08.287935  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2: (975.68µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0110 19:49:08.288388  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31/status: (3.071101ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38812]
I0110 19:49:08.289461  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (1.073092ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0110 19:49:08.289936  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.088787ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38812]
I0110 19:49:08.290172  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.290359  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:08.290372  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:08.290458  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.290528  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.290912  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (1.064154ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0110 19:49:08.292248  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.153011ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0110 19:49:08.293273  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32/status: (2.001428ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38812]
I0110 19:49:08.293768  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (2.035353ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0110 19:49:08.293814  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-32.1578947fb8769a8e: (2.378768ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38818]
I0110 19:49:08.295035  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.317ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38812]
I0110 19:49:08.295112  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.028415ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0110 19:49:08.295349  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.295530  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:08.295547  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:08.295645  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.295688  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.296563  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (1.10896ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38812]
I0110 19:49:08.296835  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (948.024µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0110 19:49:08.297763  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31/status: (1.673879ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38820]
I0110 19:49:08.298382  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (1.127019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38812]
I0110 19:49:08.298861  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-31.1578947fb8db2cd9: (2.437654ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38822]
I0110 19:49:08.299530  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.402557ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38820]
I0110 19:49:08.299824  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.299972  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (1.140115ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38812]
I0110 19:49:08.299983  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:08.300073  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:08.300196  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.300257  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.301723  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.277387ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0110 19:49:08.301809  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (1.348377ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38822]
I0110 19:49:08.302309  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.522627ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0110 19:49:08.302984  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30/status: (2.240331ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38824]
I0110 19:49:08.303771  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (1.292075ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0110 19:49:08.304514  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.088547ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0110 19:49:08.304790  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.304988  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-2
I0110 19:49:08.305003  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-2
I0110 19:49:08.305081  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.305151  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.305564  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (1.279601ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0110 19:49:08.307265  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (1.062847ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0110 19:49:08.307292  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2: (1.649591ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38822]
I0110 19:49:08.307601  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2/status: (1.96952ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0110 19:49:08.308342  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-2.1578947fa8ab8cb6: (2.591237ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0110 19:49:08.308863  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.083647ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38822]
I0110 19:49:08.309013  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2: (973.155µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0110 19:49:08.309323  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.309540  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:08.309562  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:08.309740  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.309802  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.310625  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (1.346592ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0110 19:49:08.311960  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30/status: (1.830153ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0110 19:49:08.312475  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (2.256672ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38830]
I0110 19:49:08.313642  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.77572ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38832]
I0110 19:49:08.313921  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.364357ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0110 19:49:08.314212  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.314355  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:08.314364  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:08.314453  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.314515  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.314524  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-30.1578947fb9c5553f: (2.942645ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0110 19:49:08.315199  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (1.121868ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38832]
I0110 19:49:08.315987  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.212558ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0110 19:49:08.316862  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.196216ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38832]
I0110 19:49:08.316891  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27/status: (2.141882ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38830]
I0110 19:49:08.317854  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.69743ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0110 19:49:08.318827  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (1.481028ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38832]
I0110 19:49:08.318845  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.496955ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0110 19:49:08.319106  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.319282  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:08.319301  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:08.319401  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.319446  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.320639  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.29933ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0110 19:49:08.321682  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.329616ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38836]
I0110 19:49:08.321714  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26/status: (1.959514ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0110 19:49:08.322559  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (2.102909ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38834]
I0110 19:49:08.322690  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (1.696223ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0110 19:49:08.323311  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.082589ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0110 19:49:08.323574  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.323800  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:08.323817  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:08.323937  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.324021  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.324592  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.320758ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0110 19:49:08.326066  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.317726ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38836]
I0110 19:49:08.326342  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27/status: (1.576831ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0110 19:49:08.327169  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.415089ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0110 19:49:08.327398  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-27.1578947fba9eab83: (2.222627ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38838]
I0110 19:49:08.328557  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.496293ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38836]
I0110 19:49:08.328834  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.329014  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:08.329024  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:08.329112  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.329156  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.330650  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.189097ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38838]
I0110 19:49:08.330891  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26/status: (1.420546ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0110 19:49:08.331511  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (3.262262ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0110 19:49:08.332357  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-26.1578947fbaea32e7: (2.360651ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38840]
I0110 19:49:08.332368  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.086263ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0110 19:49:08.332733  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.332951  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:08.332971  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:08.333085  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.333178  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.333221  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.223844ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0110 19:49:08.334958  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.408653ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38838]
I0110 19:49:08.335333  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.261725ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38842]
I0110 19:49:08.335567  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25/status: (2.164703ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38840]
I0110 19:49:08.336087  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.481791ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0110 19:49:08.336988  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.296767ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38842]
I0110 19:49:08.337338  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.061643ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38840]
I0110 19:49:08.337681  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.337828  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:08.337857  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:08.337932  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.337977  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.338439  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.040373ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0110 19:49:08.339589  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.372854ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38838]
I0110 19:49:08.340305  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.518479ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38844]
I0110 19:49:08.340751  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40/status: (2.562582ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38840]
I0110 19:49:08.340784  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.976877ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0110 19:49:08.342444  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.291425ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38844]
I0110 19:49:08.342580  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.394408ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38838]
I0110 19:49:08.342855  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.343016  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:08.343028  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:08.343133  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.343190  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.344003  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.179383ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38844]
I0110 19:49:08.345120  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25/status: (1.71485ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38838]
I0110 19:49:08.345120  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.218179ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38846]
I0110 19:49:08.345694  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.283187ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38844]
I0110 19:49:08.346939  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-25.1578947fbbbb8c0d: (2.862526ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38848]
I0110 19:49:08.346942  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.164553ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38838]
I0110 19:49:08.347516  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.347683  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:08.347701  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (1.109854ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38844]
I0110 19:49:08.347706  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:08.347828  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.347882  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.349382  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.290723ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38848]
I0110 19:49:08.349688  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.1415ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38850]
I0110 19:49:08.349796  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40/status: (1.633998ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38846]
I0110 19:49:08.351469  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.674524ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38848]
I0110 19:49:08.351857  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.499359ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38850]
I0110 19:49:08.352078  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.352287  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:08.352300  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:08.352371  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.352417  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.353948  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (2.024258ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38848]
I0110 19:49:08.354334  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (1.590855ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38852]
I0110 19:49:08.355088  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21/status: (2.435502ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38850]
I0110 19:49:08.356111  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-40.1578947fbc04fbce: (5.264567ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38846]
I0110 19:49:08.356557  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.612169ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38848]
I0110 19:49:08.357367  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (1.543783ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38850]
I0110 19:49:08.357840  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.358010  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:08.358028  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:08.358113  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.358158  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.358213  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.237257ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38848]
I0110 19:49:08.359002  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-21.1578947fabc321dd: (2.246285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38846]
I0110 19:49:08.359956  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.398942ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38848]
I0110 19:49:08.360588  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.867057ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38852]
I0110 19:49:08.370508  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (8.920394ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38852]
I0110 19:49:08.371318  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37/status: (12.768958ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38850]
I0110 19:49:08.372702  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (13.09162ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38846]
I0110 19:49:08.374269  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.409387ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38850]
I0110 19:49:08.374579  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.375049  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (2.686495ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38852]
I0110 19:49:08.375301  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:08.375315  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:08.375406  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.375449  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.377060  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (1.506183ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38846]
I0110 19:49:08.377386  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.278416ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38854]
I0110 19:49:08.378077  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.791507ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38856]
I0110 19:49:08.378347  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35/status: (2.204901ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38848]
I0110 19:49:08.378558  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.131798ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38846]
I0110 19:49:08.380220  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.262679ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38854]
I0110 19:49:08.380281  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.419989ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38856]
I0110 19:49:08.380623  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.380770  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:08.380786  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:08.380897  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.380943  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.381911  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (1.219523ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38856]
I0110 19:49:08.382739  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.429212ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38854]
I0110 19:49:08.383547  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37/status: (2.074687ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38858]
I0110 19:49:08.383797  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.19735ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38856]
I0110 19:49:08.385006  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-37.1578947fbd38e7cc: (3.433149ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38860]
I0110 19:49:08.385383  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.170178ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38856]
I0110 19:49:08.385610  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.594602ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38858]
I0110 19:49:08.385840  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.385997  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:08.386036  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:08.386128  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.386193  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.386864  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.032262ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38860]
I0110 19:49:08.387461  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.104587ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38858]
I0110 19:49:08.387912  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35/status: (1.473507ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38854]
I0110 19:49:08.388358  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.089236ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38862]
I0110 19:49:08.388596  121338 preemption_test.go:598] Cleaning up all pods...
I0110 19:49:08.389556  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-35.1578947fbe40a9e5: (2.302398ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38860]
I0110 19:49:08.389733  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.28237ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38854]
I0110 19:49:08.390181  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.390405  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:08.390422  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:08.390567  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.390620  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.392301  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.408461ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38858]
I0110 19:49:08.393541  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29/status: (2.658459ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38860]
I0110 19:49:08.393871  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.536117ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38864]
I0110 19:49:08.394667  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-0: (5.874436ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38862]
I0110 19:49:08.395141  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.185805ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38860]
I0110 19:49:08.395346  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.395562  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:08.395581  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:08.395693  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.395750  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.397933  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.919402ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38858]
I0110 19:49:08.397938  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14/status: (1.973374ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38860]
I0110 19:49:08.398809  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-14.1578947fa722507e: (2.247079ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0110 19:49:08.399744  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.219782ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38860]
I0110 19:49:08.399996  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.400011  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1: (4.956223ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38864]
I0110 19:49:08.400200  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:08.400215  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:08.400353  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.400405  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.401763  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (912.792µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38868]
I0110 19:49:08.402172  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.201874ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38870]
I0110 19:49:08.402734  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24/status: (2.066655ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38858]
I0110 19:49:08.404158  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (1.018569ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38870]
I0110 19:49:08.404433  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.404458  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2: (4.029607ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0110 19:49:08.404621  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:08.404640  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:08.404711  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.404791  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.422054  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13/status: (17.045495ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38868]
I0110 19:49:08.423322  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-13.1578947fadc61a75: (17.802234ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38874]
I0110 19:49:08.430384  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (25.223875ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38872]
I0110 19:49:08.430930  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (7.376131ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38868]
I0110 19:49:08.434740  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.434882  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (30.076207ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38870]
I0110 19:49:08.434996  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:08.435008  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:08.435140  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.435192  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.447001  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (1.855545ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38870]
I0110 19:49:08.448362  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24/status: (3.664499ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38868]
I0110 19:49:08.449975  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-24.1578947fbfbd7c2d: (5.337523ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38874]
I0110 19:49:08.452375  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (3.159707ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38870]
I0110 19:49:08.452708  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.453339  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4
I0110 19:49:08.453385  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4
I0110 19:49:08.454257  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9
I0110 19:49:08.454283  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9
I0110 19:49:08.454398  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.454436  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.459306  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (13.904273ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38878]
I0110 19:49:08.459383  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (4.173819ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38880]
I0110 19:49:08.460579  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.591792ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38874]
I0110 19:49:08.466964  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-9.1578947fa682f7d6: (3.608556ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38874]
I0110 19:49:08.467041  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (7.292479ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38878]
I0110 19:49:08.474427  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (6.243093ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38874]
I0110 19:49:08.480027  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (4.612356ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38874]
I0110 19:49:08.482722  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9/status: (27.027348ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38876]
I0110 19:49:08.503178  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (14.526497ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38876]
I0110 19:49:08.503698  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.503896  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:08.503911  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:08.504036  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.504079  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.507472  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.335543ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0110 19:49:08.528423  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49/status: (21.916138ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38876]
I0110 19:49:08.528648  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (48.241325ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38874]
I0110 19:49:08.528896  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (22.984355ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38880]
I0110 19:49:08.531103  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.836607ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38876]
I0110 19:49:08.531407  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.531575  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:08.531599  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:08.531687  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.531736  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.535646  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.164496ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0110 19:49:08.536639  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (2.468891ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0110 19:49:08.536867  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45/status: (2.328413ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38880]
I0110 19:49:08.539660  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (2.334916ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0110 19:49:08.539805  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (9.130484ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38874]
I0110 19:49:08.540146  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.540477  121338 cacher.go:598] cacher (*core.Pod): 1 objects queued in incoming channel.
I0110 19:49:08.540518  121338 cacher.go:598] cacher (*core.Pod): 2 objects queued in incoming channel.
I0110 19:49:08.540530  121338 cacher.go:598] cacher (*core.Pod): 3 objects queued in incoming channel.
I0110 19:49:08.540827  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:08.540839  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:08.540932  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.540971  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.545867  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (3.11554ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0110 19:49:08.546390  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49/status: (4.216336ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38888]
I0110 19:49:08.546756  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-49.1578947fc5eb77d7: (4.944192ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38890]
I0110 19:49:08.548342  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (8.038231ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38874]
I0110 19:49:08.548427  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.635316ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38888]
I0110 19:49:08.548742  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.548936  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:08.548978  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:08.549099  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.549250  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.551641  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.765241ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38894]
I0110 19:49:08.553331  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (3.981075ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0110 19:49:08.553422  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33/status: (3.572435ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38892]
I0110 19:49:08.555608  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (1.794756ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0110 19:49:08.555894  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.556085  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:08.556103  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:08.556194  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.556242  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.559413  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49/status: (2.942156ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0110 19:49:08.559974  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (11.269526ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38890]
I0110 19:49:08.560057  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (3.42328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38894]
I0110 19:49:08.561024  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-49.1578947fc5eb77d7: (3.963868ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38896]
I0110 19:49:08.561063  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.20916ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0110 19:49:08.561661  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.561869  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:08.561911  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:08.562045  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.562107  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.564583  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33/status: (2.047717ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38896]
I0110 19:49:08.565292  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (4.874545ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38894]
I0110 19:49:08.566447  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (1.396026ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38896]
I0110 19:49:08.566472  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (4.019294ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38890]
I0110 19:49:08.566734  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.566887  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:08.566901  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:08.567013  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.567074  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.567144  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-33.1578947fc89b5193: (4.277725ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38898]
I0110 19:49:08.568470  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.164401ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38896]
I0110 19:49:08.569440  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29/status: (2.138497ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38890]
I0110 19:49:08.569929  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-29.1578947fbf283ce1: (2.157016ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38898]
I0110 19:49:08.571360  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (5.681732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38894]
I0110 19:49:08.578843  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (7.022987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38898]
I0110 19:49:08.586181  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (6.481643ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38898]
I0110 19:49:08.591009  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (4.330574ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38898]
I0110 19:49:08.595299  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (3.760336ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38898]
I0110 19:49:08.595639  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (25.544354ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38890]
I0110 19:49:08.595895  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.596121  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:08.596142  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:08.596262  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.596342  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.597642  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (1.00626ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38896]
I0110 19:49:08.598552  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19/status: (1.969344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38890]
I0110 19:49:08.599719  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-19.1578947fa8146e94: (2.632077ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38900]
I0110 19:49:08.600102  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (1.139192ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38890]
I0110 19:49:08.600370  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.600518  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (4.834098ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38898]
I0110 19:49:08.600558  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:08.600570  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:08.600680  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.600723  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.602392  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.153694ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.603054  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20/status: (2.089596ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38896]
I0110 19:49:08.604028  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-20.1578947faa6eceb8: (2.687053ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38904]
I0110 19:49:08.604571  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.009161ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38896]
I0110 19:49:08.604851  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.605007  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:08.605024  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:08.605169  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (4.311942ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38900]
I0110 19:49:08.605158  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.605224  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.607311  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.421659ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38908]
I0110 19:49:08.607379  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42/status: (1.878948ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38904]
I0110 19:49:08.607859  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (2.002612ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.609116  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (1.206644ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38904]
I0110 19:49:08.609427  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.609641  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:08.609658  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:08.609738  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.609824  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.609875  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (4.382297ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.612526  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (2.524379ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.612855  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48/status: (2.736438ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38908]
I0110 19:49:08.612980  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.423551ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38910]
I0110 19:49:08.614193  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.010131ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38908]
I0110 19:49:08.614419  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.614578  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:08.614607  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:08.614697  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:08.614748  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:08.614983  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (4.829632ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.616160  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.152068ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.616659  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48/status: (1.703845ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38908]
I0110 19:49:08.617684  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-48.1578947fcc387394: (2.388592ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.618864  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.504203ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38908]
I0110 19:49:08.619197  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:08.619687  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (4.156291ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38912]
I0110 19:49:08.624039  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (3.958602ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.624979  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:08.625015  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:08.627311  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.911075ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.627585  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:08.627623  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:08.628913  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (4.469359ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.629371  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.491431ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.632274  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:08.632311  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:08.633430  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (4.125209ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.634599  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.676149ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.636376  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:08.636412  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:08.637659  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (3.867006ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.637996  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.220834ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.640796  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:08.640841  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:08.642458  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (4.483119ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.642764  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.586195ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.645725  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:08.645789  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:08.646991  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (4.143394ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.647753  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.671553ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.649912  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:08.649949  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:08.651062  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (3.628064ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.653364  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.162015ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.654617  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:08.654659  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:08.656053  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (4.600177ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.656865  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.956955ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.659215  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:08.659261  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:08.660696  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (3.949869ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.661214  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.532901ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.664077  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:08.664139  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:08.665431  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (4.072822ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.666364  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.911371ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.668994  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:08.669036  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:08.670693  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (4.809204ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.670951  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.597367ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.673902  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:08.673938  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:08.675475  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (4.369239ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.675906  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.627313ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.680438  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:08.680518  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:08.682033  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (6.192232ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.683439  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.893123ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.685458  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:08.685528  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:08.686920  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (4.532404ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.687283  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.420518ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.690099  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:08.690176  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:08.691677  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (4.365496ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.692196  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.647674ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.695041  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:08.695125  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:08.696301  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (4.154063ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.698045  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.604002ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.699425  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:08.699463  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:08.700856  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (4.151836ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.701261  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.524854ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.704221  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:08.704295  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:08.706067  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.443368ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.706734  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (5.3373ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.711447  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:08.711553  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (4.334747ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.711658  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:08.714679  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.337779ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.716782  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:08.716857  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:08.717997  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (4.892991ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.719034  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.795878ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.721307  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:08.721346  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:08.722040  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:08.723457  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.82781ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.723673  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:08.724125  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:08.724607  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (6.051067ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.724941  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:08.726069  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:08.728331  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:08.728916  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:08.730222  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (5.342858ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.733015  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.994875ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.734316  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:08.734460  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:08.735942  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (5.352922ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.736821  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.857064ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.739811  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:08.739849  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:08.741980  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.586374ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.742322  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (5.570799ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.746180  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:08.746220  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:08.747725  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (4.746775ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.748697  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.908048ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.752618  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:08.752655  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:08.754646  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.697126ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.754912  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (6.5997ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.758404  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:08.758452  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:08.759653  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (4.342001ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.760132  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.417781ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.764752  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-0: (4.696437ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.766328  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1: (1.112672ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.770948  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (4.216594ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.773985  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-0: (1.207131ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.776765  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1: (1.189631ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.779533  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2: (1.178015ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.782610  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (1.315362ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.785427  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (1.093623ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.788171  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (1.131471ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.791136  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.216967ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.794210  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (1.177091ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.797021  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (1.185881ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.800237  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (1.473414ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.803143  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (1.267134ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.806147  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (1.393277ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.809146  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (1.315615ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.811707  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (906.403µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.814691  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.335871ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.817782  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (1.361549ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.820796  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.401785ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.823804  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (1.413093ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.826522  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.181621ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.829380  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (1.198585ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.833025  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.858711ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.835885  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (1.078196ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.838984  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.293396ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.842061  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.399608ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.845062  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (1.271149ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.848201  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.489217ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.851857  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.914883ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.855248  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.360841ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.858819  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.505029ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.863335  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (2.674155ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.868727  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.405143ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.872253  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.558195ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.875872  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.847549ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.879314  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (1.563667ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.882710  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.474017ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.885841  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.419318ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.889225  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.521232ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.893357  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.994672ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.896825  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.31548ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.899966  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.469753ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.903561  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.973181ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.906715  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.434811ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.909570  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (1.211437ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.912633  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.420221ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.915517  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.087844ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.918238  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (1.079295ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.921142  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.343127ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.923956  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.167884ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.934901  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (9.303757ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.941832  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.18354ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.944476  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-0: (1.04409ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.947584  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1: (1.388103ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.950315  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (1.043628ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.952885  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.986034ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.953540  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0
I0110 19:49:08.953558  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0
I0110 19:49:08.953718  121338 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0", node "node1"
I0110 19:49:08.953736  121338 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I0110 19:49:08.953781  121338 factory.go:1166] Attempting to bind rpod-0 to node1
I0110 19:49:08.955700  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-0/binding: (1.656458ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.955873  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.073686ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.955935  121338 scheduler.go:569] pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0110 19:49:08.956213  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1
I0110 19:49:08.956278  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1
I0110 19:49:08.956436  121338 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1", node "node1"
I0110 19:49:08.956473  121338 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I0110 19:49:08.956553  121338 factory.go:1166] Attempting to bind rpod-1 to node1
I0110 19:49:08.958066  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.769115ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:08.958397  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1/binding: (1.593364ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:08.958627  121338 scheduler.go:569] pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0110 19:49:08.960335  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.437491ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:09.058456  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-0: (1.817936ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:09.161313  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1: (1.89538ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:09.161721  121338 preemption_test.go:561] Creating the preemptor pod...
I0110 19:49:09.164203  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.215516ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:09.164445  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:09.164477  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:09.164632  121338 preemption_test.go:567] Creating additional pods...
I0110 19:49:09.164738  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.164794  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.166782  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (1.444357ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38924]
I0110 19:49:09.168674  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.317123ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38902]
I0110 19:49:09.168677  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod/status: (3.622838ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:09.169193  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.783442ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38926]
I0110 19:49:09.173765  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (4.429376ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38924]
I0110 19:49:09.173781  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (4.570497ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0110 19:49:09.174253  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.176273  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.909085ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38924]
I0110 19:49:09.177081  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod/status: (2.379558ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38926]
I0110 19:49:09.178645  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.865026ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38924]
I0110 19:49:09.181245  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.095477ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38924]
I0110 19:49:09.182095  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1: (4.509674ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38926]
I0110 19:49:09.182398  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:09.182413  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:09.182623  121338 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod", node "node1"
I0110 19:49:09.182643  121338 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I0110 19:49:09.182676  121338 factory.go:1166] Attempting to bind preemptor-pod to node1
I0110 19:49:09.182900  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4
I0110 19:49:09.182915  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4
I0110 19:49:09.183059  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.183095  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.184727  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.140738ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38926]
I0110 19:49:09.185043  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod/binding: (1.725301ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38928]
I0110 19:49:09.185131  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.393291ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38924]
I0110 19:49:09.185407  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (1.779451ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38932]
I0110 19:49:09.185555  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4/status: (2.007802ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38930]
I0110 19:49:09.185587  121338 scheduler.go:569] pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0110 19:49:09.186669  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.493492ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38926]
I0110 19:49:09.187171  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.600177ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38924]
I0110 19:49:09.187381  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (1.323857ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38932]
I0110 19:49:09.187798  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.187921  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3
I0110 19:49:09.187941  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3
I0110 19:49:09.188039  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.188092  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.188925  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.812638ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38926]
I0110 19:49:09.189036  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.509595ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38924]
I0110 19:49:09.192020  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (3.629755ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38928]
I0110 19:49:09.192041  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3/status: (3.680385ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38932]
I0110 19:49:09.193006  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.274771ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38926]
I0110 19:49:09.194089  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.319874ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38924]
I0110 19:49:09.194247  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (1.277031ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38928]
I0110 19:49:09.194508  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.194683  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6
I0110 19:49:09.194703  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6
I0110 19:49:09.194803  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.194855  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.195599  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.029418ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38926]
I0110 19:49:09.196537  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.44491ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38932]
I0110 19:49:09.197575  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6/status: (2.269705ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38924]
I0110 19:49:09.197584  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.866436ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38934]
I0110 19:49:09.198375  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.896539ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38926]
I0110 19:49:09.199040  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.021609ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38924]
I0110 19:49:09.199333  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.199524  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8
I0110 19:49:09.199539  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8
I0110 19:49:09.199645  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.199695  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.200638  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.839393ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38926]
I0110 19:49:09.201265  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (1.278778ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38932]
I0110 19:49:09.201837  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8/status: (1.93024ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38924]
I0110 19:49:09.201922  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.577673ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38936]
I0110 19:49:09.202656  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.516055ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38926]
I0110 19:49:09.203355  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (1.084549ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38924]
I0110 19:49:09.203614  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.203791  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10
I0110 19:49:09.203808  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10
I0110 19:49:09.203884  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.203947  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.205173  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.903323ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38926]
I0110 19:49:09.205415  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (1.19058ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38932]
I0110 19:49:09.206114  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.570102ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38938]
I0110 19:49:09.207222  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.535414ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38926]
I0110 19:49:09.207295  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10/status: (3.002163ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38924]
I0110 19:49:09.209293  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (1.316906ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38938]
I0110 19:49:09.209593  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.505093ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38932]
I0110 19:49:09.209660  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.209846  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12
I0110 19:49:09.209864  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12
I0110 19:49:09.210056  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.210112  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.211894  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (1.657844ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38938]
I0110 19:49:09.212243  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.426745ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38942]
I0110 19:49:09.212651  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.449903ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38932]
I0110 19:49:09.213052  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12/status: (2.407564ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38940]
I0110 19:49:09.215287  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (1.236852ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38940]
I0110 19:49:09.215417  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.223317ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38942]
I0110 19:49:09.215609  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.215795  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:09.215819  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:09.215948  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.216025  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.218040  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.719481ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38938]
I0110 19:49:09.218173  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (1.48238ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38944]
I0110 19:49:09.218184  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.15518ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38940]
I0110 19:49:09.218685  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15/status: (1.947418ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38946]
I0110 19:49:09.220320  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (1.191936ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38944]
I0110 19:49:09.220689  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.220903  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:09.220922  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:09.221063  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.221111  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.221712  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.066463ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38938]
I0110 19:49:09.222949  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (1.160228ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38948]
I0110 19:49:09.223317  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.339005ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38950]
I0110 19:49:09.224185  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.900732ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38938]
I0110 19:49:09.224459  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17/status: (2.87853ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38944]
I0110 19:49:09.226171  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (1.326044ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38950]
I0110 19:49:09.226480  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.226837  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:09.226862  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.714427ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38948]
I0110 19:49:09.226862  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:09.226973  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.227168  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.229674  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.917229ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38954]
I0110 19:49:09.229842  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (2.196606ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38952]
I0110 19:49:09.230345  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18/status: (2.94347ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38950]
I0110 19:49:09.230454  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.116664ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38948]
I0110 19:49:09.232273  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.427924ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38952]
I0110 19:49:09.232584  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.232752  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:09.232801  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:09.232945  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.232998  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.233440  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.507164ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38954]
I0110 19:49:09.234838  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.212804ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38956]
I0110 19:49:09.235313  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.863564ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38952]
I0110 19:49:09.235806  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.992105ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38954]
I0110 19:49:09.235916  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20/status: (2.253169ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38958]
I0110 19:49:09.237720  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.336736ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38956]
I0110 19:49:09.237879  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.525904ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38952]
I0110 19:49:09.238003  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.238252  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:09.238269  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:09.238355  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.238413  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.240019  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.614305ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38952]
I0110 19:49:09.240678  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.707687ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38962]
I0110 19:49:09.241317  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (2.361511ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38960]
I0110 19:49:09.241450  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22/status: (2.781117ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38956]
I0110 19:49:09.242086  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.537999ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38952]
I0110 19:49:09.243179  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.31471ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38960]
I0110 19:49:09.243481  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.243663  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:09.243684  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:09.243815  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.243886  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.243985  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.450722ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38952]
I0110 19:49:09.245569  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.15161ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38962]
I0110 19:49:09.246106  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.50519ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38952]
I0110 19:49:09.246712  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25/status: (2.266401ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38960]
I0110 19:49:09.247667  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.480662ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38964]
I0110 19:49:09.248838  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.276197ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38962]
I0110 19:49:09.249222  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.249396  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:09.249412  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:09.249508  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.249554  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.251962  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.810889ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38968]
I0110 19:49:09.252179  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (2.350977ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38962]
I0110 19:49:09.252446  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27/status: (2.57655ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38966]
I0110 19:49:09.253026  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (4.889293ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38964]
I0110 19:49:09.255246  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (2.245728ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38962]
I0110 19:49:09.255562  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.255687  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:09.255726  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:09.255812  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.255861  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.256515  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.964651ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38964]
I0110 19:49:09.257580  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.378538ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38968]
I0110 19:49:09.258551  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29/status: (2.391245ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38962]
I0110 19:49:09.259206  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.610333ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38970]
I0110 19:49:09.259371  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.362485ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38964]
I0110 19:49:09.260757  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.307733ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38962]
I0110 19:49:09.261049  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.261258  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:09.261279  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:09.261407  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.261454  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.261890  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.885493ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38970]
I0110 19:49:09.263552  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.606023ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38968]
I0110 19:49:09.263646  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27/status: (1.928006ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38962]
I0110 19:49:09.264449  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-27.1578947ff25a8513: (2.263325ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38970]
I0110 19:49:09.264852  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.843693ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38972]
I0110 19:49:09.265207  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.169415ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38962]
I0110 19:49:09.265440  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.265614  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:09.265675  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:09.265834  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.265896  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.267049  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.712138ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38970]
I0110 19:49:09.267900  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.420624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38968]
I0110 19:49:09.269251  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.90779ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38970]
I0110 19:49:09.269730  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.910913ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38976]
I0110 19:49:09.269904  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32/status: (3.405701ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38962]
I0110 19:49:09.273097  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.902964ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38968]
I0110 19:49:09.273308  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.273526  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:09.273544  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:09.273647  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.454151ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38974]
I0110 19:49:09.273660  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.273702  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.275702  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.409777ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38974]
I0110 19:49:09.276882  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.344272ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38980]
I0110 19:49:09.277059  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34/status: (2.782906ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38968]
I0110 19:49:09.277060  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.519413ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38978]
I0110 19:49:09.281159  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.199566ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38980]
I0110 19:49:09.281218  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (2.066726ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38974]
I0110 19:49:09.281468  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.281687  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:09.281703  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:09.281865  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.281918  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.284242  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.489736ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38974]
I0110 19:49:09.284357  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.703878ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38986]
I0110 19:49:09.285616  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (3.036061ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38984]
I0110 19:49:09.285927  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36/status: (3.391208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38980]
I0110 19:49:09.286627  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.785759ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38974]
I0110 19:49:09.287594  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.1359ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38984]
I0110 19:49:09.287887  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.288058  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:09.288076  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:09.288201  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.288262  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.289934  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.714572ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38974]
I0110 19:49:09.290345  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.347208ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38988]
I0110 19:49:09.290457  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.625884ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38986]
I0110 19:49:09.290527  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39/status: (1.649538ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38984]
I0110 19:49:09.293303  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (2.07555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38986]
I0110 19:49:09.293416  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.622758ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38988]
I0110 19:49:09.293846  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.294017  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:09.294029  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:09.294154  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.294207  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.295906  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.910944ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38988]
I0110 19:49:09.296376  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.5236ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38992]
I0110 19:49:09.297187  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (2.340131ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38990]
I0110 19:49:09.297651  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41/status: (2.885694ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38974]
I0110 19:49:09.298077  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.651869ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38988]
I0110 19:49:09.299268  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.196653ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38990]
I0110 19:49:09.299630  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.299780  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.382448ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38988]
I0110 19:49:09.299816  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:09.299856  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:09.299945  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.300004  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.301707  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.357538ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38992]
I0110 19:49:09.302593  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.892042ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38988]
I0110 19:49:09.302696  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.670796ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38994]
I0110 19:49:09.302887  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43/status: (2.561339ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38990]
I0110 19:49:09.305085  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.688815ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38990]
I0110 19:49:09.305429  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.319656ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38988]
I0110 19:49:09.305834  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.306162  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:09.306178  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:09.306566  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.306626  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.307912  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.041299ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38988]
I0110 19:49:09.308722  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.514782ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38998]
I0110 19:49:09.309216  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (2.006313ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38996]
I0110 19:49:09.309319  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45/status: (2.276801ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38992]
I0110 19:49:09.310943  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (1.14467ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38998]
I0110 19:49:09.311281  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.311425  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:09.311441  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:09.311573  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.311655  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.313695  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.822221ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38998]
I0110 19:49:09.314194  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.720321ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39000]
I0110 19:49:09.314648  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48/status: (2.506299ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38988]
I0110 19:49:09.316183  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.072547ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39000]
I0110 19:49:09.316459  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.316722  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:09.316737  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:09.316857  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.316912  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.318329  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.182838ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38998]
I0110 19:49:09.319249  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.616149ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39002]
I0110 19:49:09.319436  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49/status: (2.290166ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39000]
I0110 19:49:09.321242  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.2032ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39002]
I0110 19:49:09.321512  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.321667  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:09.321681  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:09.321761  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.321808  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.324313  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.73809ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38998]
I0110 19:49:09.324477  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48/status: (1.843179ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39002]
I0110 19:49:09.325613  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-48.1578947ff60e1ae9: (2.883765ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39004]
I0110 19:49:09.326348  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.360214ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39002]
I0110 19:49:09.326614  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.326800  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:09.326819  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:09.326940  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.327005  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.330559  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49/status: (2.872861ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39004]
I0110 19:49:09.330809  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (2.619965ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38998]
I0110 19:49:09.331710  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-49.1578947ff65e4087: (3.941779ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39006]
I0110 19:49:09.333820  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (2.481933ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38998]
I0110 19:49:09.334126  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.334292  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:09.334311  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:09.334447  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.334519  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.336485  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.70949ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39006]
I0110 19:49:09.336955  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.859827ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39008]
I0110 19:49:09.337094  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47/status: (2.315857ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39004]
I0110 19:49:09.338948  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.298601ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39008]
I0110 19:49:09.339213  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.339423  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:09.339438  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:09.339545  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.339611  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.341281  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.313766ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39006]
I0110 19:49:09.342035  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43/status: (2.16768ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39008]
I0110 19:49:09.343284  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-43.1578947ff55c5629: (2.871841ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39010]
I0110 19:49:09.343721  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.160189ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39008]
I0110 19:49:09.344000  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.344175  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:09.344195  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:09.344314  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.344367  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.345885  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.241047ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39006]
I0110 19:49:09.346257  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47/status: (1.623417ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39010]
I0110 19:49:09.348197  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.303149ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39010]
I0110 19:49:09.348287  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-47.1578947ff76afb0e: (2.648969ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39012]
I0110 19:49:09.348483  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.348692  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:09.348709  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:09.348802  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.348862  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.350875  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.683674ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39006]
I0110 19:49:09.352102  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.562452ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39014]
I0110 19:49:09.353626  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46/status: (4.502911ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39010]
I0110 19:49:09.355659  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.549505ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39014]
I0110 19:49:09.356004  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.356169  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:09.356183  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:09.356335  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.356384  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.358602  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.828846ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39014]
I0110 19:49:09.359313  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41/status: (2.423311ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39006]
I0110 19:49:09.360335  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-41.1578947ff503e435: (3.128346ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39016]
I0110 19:49:09.361469  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.460581ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39006]
I0110 19:49:09.361837  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.362023  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:09.362038  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:09.362147  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.362205  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.363953  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.513732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39014]
I0110 19:49:09.364169  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46/status: (1.726509ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39016]
I0110 19:49:09.366271  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-46.1578947ff845b3af: (2.615285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0110 19:49:09.366478  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.400224ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39016]
I0110 19:49:09.366786  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.366953  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:09.366969  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:09.367048  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.367097  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.369310  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44/status: (1.964851ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0110 19:49:09.369460  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.663933ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0110 19:49:09.369482  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.760336ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39014]
I0110 19:49:09.371253  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.512847ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0110 19:49:09.371508  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.371687  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:09.371702  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:09.371854  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.371936  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.374076  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.913814ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0110 19:49:09.374303  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39/status: (1.612979ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39014]
I0110 19:49:09.375211  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-39.1578947ff4a90edf: (2.489896ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39022]
I0110 19:49:09.375889  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.125812ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39014]
I0110 19:49:09.376221  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.376409  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:09.376425  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:09.376527  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.376578  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.378138  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.26931ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0110 19:49:09.378857  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44/status: (2.055735ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39022]
I0110 19:49:09.379623  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-44.1578947ff95c132d: (2.395818ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39024]
I0110 19:49:09.380461  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.181678ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39022]
I0110 19:49:09.380724  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.380882  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:09.380894  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:09.380979  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.381024  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.383152  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.419646ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39026]
I0110 19:49:09.383338  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (1.702547ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0110 19:49:09.383472  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42/status: (2.194294ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39024]
I0110 19:49:09.385258  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (1.392ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0110 19:49:09.385517  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.385724  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:09.385746  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:09.385835  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.385878  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.388004  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.325809ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39026]
I0110 19:49:09.388107  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36/status: (1.935891ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0110 19:49:09.389731  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.157511ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0110 19:49:09.390051  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.390199  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:09.390250  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:09.390397  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-36.1578947ff448585f: (2.755169ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39028]
I0110 19:49:09.390574  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.390669  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.392631  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.700446ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0110 19:49:09.393983  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.878282ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39030]
I0110 19:49:09.394053  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40/status: (3.06812ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39026]
I0110 19:49:09.395743  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.32553ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39030]
I0110 19:49:09.395992  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.396154  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:09.396170  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:09.396274  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.396313  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.398353  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.426037ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39032]
I0110 19:49:09.398402  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.77119ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0110 19:49:09.398965  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38/status: (2.410755ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39030]
I0110 19:49:09.400545  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.176145ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0110 19:49:09.400836  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.401027  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:09.401043  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:09.401173  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.401242  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.402750  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.26713ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39032]
I0110 19:49:09.403340  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40/status: (1.874148ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0110 19:49:09.404685  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-40.1578947ffac3bf4b: (2.565928ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39034]
I0110 19:49:09.404836  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.072998ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0110 19:49:09.405083  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.405249  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:09.405265  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:09.405363  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.405414  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.406982  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.289387ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39032]
I0110 19:49:09.411991  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38/status: (6.260444ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39034]
I0110 19:49:09.417541  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-38.1578947ffb19f0a2: (10.787603ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.425302  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (10.234141ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39034]
I0110 19:49:09.426455  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.426967  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:09.427009  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:09.427333  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.427411  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.434141  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (5.432724ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.435634  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (3.8503ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39032]
I0110 19:49:09.437135  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34/status: (9.014651ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39034]
I0110 19:49:09.438891  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-34.1578947ff3cafc8d: (5.806534ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0110 19:49:09.443241  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (5.22136ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39032]
I0110 19:49:09.443722  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.443886  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:09.443908  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:09.444338  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.444432  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.454469  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37/status: (9.387541ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0110 19:49:09.455276  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (6.39651ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39040]
I0110 19:49:09.458949  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (2.187022ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0110 19:49:09.459323  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.459917  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:09.459937  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:09.460166  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.460261  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.462911  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (16.83916ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.463070  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.919405ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39040]
I0110 19:49:09.463867  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32/status: (2.995962ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0110 19:49:09.469301  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-32.1578947ff353ca5b: (4.412292ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39046]
I0110 19:49:09.476241  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (11.75232ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39040]
I0110 19:49:09.476847  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.477285  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:09.477304  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:09.477469  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.477639  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.480509  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (2.084173ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.481297  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37/status: (3.201864ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39046]
I0110 19:49:09.482222  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-37.1578947ffdf7b78d: (3.214731ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39048]
I0110 19:49:09.483789  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.80264ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39046]
I0110 19:49:09.484130  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.491681  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:09.491764  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:09.492207  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.492364  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.495172  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.869204ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.497563  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35/status: (4.477322ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39048]
I0110 19:49:09.498377  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.978655ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39050]
I0110 19:49:09.499939  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.816296ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39048]
I0110 19:49:09.500347  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.500655  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:09.500724  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:09.500890  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.500950  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.504043  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.021439ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39052]
I0110 19:49:09.504305  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (2.825361ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.508947  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33/status: (7.497954ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39050]
I0110 19:49:09.510868  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (1.424693ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.511173  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.511344  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:09.511361  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:09.511434  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.511481  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.513630  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.316495ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39052]
I0110 19:49:09.514519  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35/status: (2.26647ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.516773  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-35.1578948000d2b047: (2.914383ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39054]
I0110 19:49:09.519976  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.505604ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.520377  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.520575  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:09.520599  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:09.520696  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.520836  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.529869  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33/status: (3.108669ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39052]
I0110 19:49:09.529886  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (3.116377ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.531849  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-33.1578948001566c8b: (3.693622ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39056]
I0110 19:49:09.532599  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (1.617506ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39052]
I0110 19:49:09.533095  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.533299  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:09.533315  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:09.533437  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.533523  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.535352  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.575675ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39056]
I0110 19:49:09.536156  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29/status: (2.250449ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.536604  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-29.1578947ff2bab027: (2.185828ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39058]
I0110 19:49:09.538080  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.197201ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.538352  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.538534  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:09.538553  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:09.538556  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (1.017048ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39056]
I0110 19:49:09.538665  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.538705  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.538753  121338 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I0110 19:49:09.540193  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-0: (1.288108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39056]
I0110 19:49:09.540573  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.659461ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.541063  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.752326ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39062]
I0110 19:49:09.541159  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31/status: (2.068416ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39060]
I0110 19:49:09.542519  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1: (1.895949ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39056]
I0110 19:49:09.542767  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.177892ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39062]
I0110 19:49:09.543030  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.543206  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:09.543255  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:09.543351  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.543396  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.545253  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.241532ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.545323  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2: (2.405861ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39056]
I0110 19:49:09.545777  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.672777ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39064]
I0110 19:49:09.545808  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30/status: (2.110899ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39062]
I0110 19:49:09.547570  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (1.07838ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39064]
I0110 19:49:09.547742  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.218555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.548007  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.548182  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:09.548197  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:09.548318  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.548367  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.549310  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (1.343893ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39064]
I0110 19:49:09.549641  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.035534ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.550896  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (1.065148ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39064]
I0110 19:49:09.551141  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31/status: (2.22897ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39066]
I0110 19:49:09.551475  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-31.1578948003969c39: (2.470914ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0110 19:49:09.552331  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.113208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39064]
I0110 19:49:09.552615  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.14112ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39066]
I0110 19:49:09.552854  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.553000  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:09.553017  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:09.553102  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.553200  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.553865  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (1.029502ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0110 19:49:09.555238  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (1.015666ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0110 19:49:09.555338  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.633878ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39070]
I0110 19:49:09.556064  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30/status: (2.359874ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.556781  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-30.1578948003de33e4: (2.752519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0110 19:49:09.556882  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (1.037658ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39070]
I0110 19:49:09.557959  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.225137ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.558258  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.558278  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (1.062088ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0110 19:49:09.558443  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:09.558535  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:09.558781  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.558897  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.560307  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (1.287051ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.560398  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.357935ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0110 19:49:09.561903  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (1.057718ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0110 19:49:09.562373  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25/status: (2.909342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39074]
I0110 19:49:09.564193  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-25.1578947ff203f13d: (3.442899ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.565264  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (1.708508ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0110 19:49:09.567537  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.897836ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0110 19:49:09.569299  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (1.298698ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0110 19:49:09.571680  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.97321ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0110 19:49:09.573696  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (10.144738ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39074]
I0110 19:49:09.573999  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.574200  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:09.574210  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (2.12841ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0110 19:49:09.574221  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:09.574345  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.574399  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.576703  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.837358ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39084]
I0110 19:49:09.577341  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (2.313375ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0110 19:49:09.577345  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28/status: (2.481564ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39036]
I0110 19:49:09.578063  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (3.45195ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39074]
I0110 19:49:09.578910  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (1.183384ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39084]
I0110 19:49:09.581087  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.733332ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39084]
I0110 19:49:09.581320  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (3.270787ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0110 19:49:09.581582  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.581752  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:09.581768  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:09.581862  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.581919  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.582670  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (1.239054ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39084]
I0110 19:49:09.583996  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22/status: (1.73096ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0110 19:49:09.584026  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.030663ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39084]
I0110 19:49:09.585022  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-22.1578947ff1b06b71: (2.316169ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39088]
I0110 19:49:09.585045  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.883334ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39074]
I0110 19:49:09.586057  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.04005ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0110 19:49:09.586117  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.164111ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39084]
I0110 19:49:09.586507  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.586676  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:09.586696  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:09.586813  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.586860  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.588203  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (1.652676ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39088]
I0110 19:49:09.588939  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.023072ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39090]
I0110 19:49:09.589373  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28/status: (2.0751ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39074]
I0110 19:49:09.589582  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (988.862µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39088]
I0110 19:49:09.590661  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-28.1578948005b73d39: (2.629655ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39092]
I0110 19:49:09.591155  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.314832ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39074]
I0110 19:49:09.591196  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.093703ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39090]
I0110 19:49:09.591437  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.591604  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:09.591624  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:09.591716  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.591762  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.592713  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.091632ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39074]
I0110 19:49:09.593244  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (955.746µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39094]
I0110 19:49:09.593709  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26/status: (1.732666ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39092]
I0110 19:49:09.594864  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.114611ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39094]
I0110 19:49:09.595338  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.678804ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39074]
I0110 19:49:09.595392  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.350877ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39092]
I0110 19:49:09.595618  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.595758  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:09.595774  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:09.595866  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.595915  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.596773  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.489914ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39094]
I0110 19:49:09.597248  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (963.695µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39096]
I0110 19:49:09.598440  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24/status: (2.28849ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39092]
I0110 19:49:09.598865  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.636821ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39094]
I0110 19:49:09.598912  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.458912ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39098]
I0110 19:49:09.600027  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (1.185533ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39092]
I0110 19:49:09.600306  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.075242ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39094]
I0110 19:49:09.600350  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.600514  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:09.600532  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:09.600661  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.600707  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.601797  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.126597ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39098]
I0110 19:49:09.602152  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (948.494µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39100]
I0110 19:49:09.602395  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26/status: (1.475656ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39096]
I0110 19:49:09.603207  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (1.012666ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39098]
I0110 19:49:09.604013  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.06147ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39096]
I0110 19:49:09.604305  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.604472  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:09.604511  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:09.604628  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.604674  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-26.1578948006c030a8: (2.817303ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.604732  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.604939  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.065237ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39098]
I0110 19:49:09.606220  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (946.638µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39098]
I0110 19:49:09.607048  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24/status: (2.072256ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39100]
I0110 19:49:09.607062  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (2.121183ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39096]
I0110 19:49:09.607548  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-24.1578948006ff91e8: (2.165924ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.608196  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.583474ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39098]
I0110 19:49:09.608483  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (963.621µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39100]
I0110 19:49:09.608770  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.608921  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:09.608935  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:09.609035  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.609093  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.609675  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.032442ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.610414  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.073341ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39096]
I0110 19:49:09.611355  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.348341ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.611882  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20/status: (2.539313ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39100]
I0110 19:49:09.611952  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-20.1578947ff15dcf74: (2.119602ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39104]
I0110 19:49:09.612903  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.134106ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.613332  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.035145ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39100]
I0110 19:49:09.613595  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.613803  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:09.613826  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:09.613940  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.613989  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.614394  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.074959ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.615796  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.456227ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39096]
I0110 19:49:09.616381  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.520736ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.616472  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23/status: (2.26751ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39100]
I0110 19:49:09.616679  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.352573ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39106]
I0110 19:49:09.617976  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (1.077579ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.618215  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.209877ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39096]
I0110 19:49:09.618462  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.618664  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:09.618681  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:09.618798  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.618852  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.619416  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.039522ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.620546  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.196284ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39108]
I0110 19:49:09.620934  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.156425ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39110]
I0110 19:49:09.621225  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18/status: (1.815748ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39096]
I0110 19:49:09.622126  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-18.1578947ff104e3bd: (2.394864ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.622298  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (999.48µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39110]
I0110 19:49:09.622524  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (891.091µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39096]
I0110 19:49:09.622799  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.622958  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:09.622973  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:09.623120  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.623178  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.623769  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.077866ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.625083  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.146075ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39108]
I0110 19:49:09.625184  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (996.205µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39112]
I0110 19:49:09.625200  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23/status: (1.788101ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39096]
I0110 19:49:09.626270  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-23.157894800813527e: (2.106537ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.626566  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (993.445µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39108]
I0110 19:49:09.626585  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (999.839µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39114]
I0110 19:49:09.626928  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.627069  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:09.627085  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:09.627152  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.627195  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.628186  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.251933ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39114]
I0110 19:49:09.628427  121338 preemption_test.go:598] Cleaning up all pods...
I0110 19:49:09.628683  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (977.028µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.629990  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21/status: (2.237342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39116]
I0110 19:49:09.631366  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.581635ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39118]
I0110 19:49:09.631810  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (1.086782ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39116]
I0110 19:49:09.632155  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.632389  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:09.632426  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:09.632472  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-0: (3.809687ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39114]
I0110 19:49:09.632637  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.632684  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.634756  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17/status: (1.786224ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.635192  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (2.066002ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39120]
I0110 19:49:09.636728  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-17.1578947ff0a8878a: (2.425955ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39122]
I0110 19:49:09.637890  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1: (5.111413ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39118]
I0110 19:49:09.638467  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (1.215961ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39122]
I0110 19:49:09.638755  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.638971  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:09.638991  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:09.639072  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.639120  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.640546  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (1.087147ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.640888  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21/status: (1.430214ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39122]
I0110 19:49:09.642277  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2: (4.011712ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39120]
I0110 19:49:09.642674  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-21.1578948008dce064: (2.479231ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39124]
I0110 19:49:09.642892  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (1.655486ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39122]
I0110 19:49:09.643152  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.643345  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:09.643362  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:09.643427  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.643460  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.644747  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (960.407µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.645606  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19/status: (1.893694ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39124]
I0110 19:49:09.645824  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.73949ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0110 19:49:09.646344  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (3.775035ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39120]
I0110 19:49:09.647353  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (1.049615ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0110 19:49:09.647634  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.647796  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:09.647811  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:09.647880  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.647921  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.649481  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.25933ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.649750  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16/status: (1.466491ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0110 19:49:09.650077  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.618064ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39128]
I0110 19:49:09.650276  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (3.590952ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39120]
I0110 19:49:09.651031  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (912.024µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0110 19:49:09.651325  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.651456  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:09.651472  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:09.651562  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.651615  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.653025  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.186169ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.653794  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14/status: (1.944645ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0110 19:49:09.654343  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (3.772259ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39128]
I0110 19:49:09.654811  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.815734ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39130]
I0110 19:49:09.655377  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.078442ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0110 19:49:09.655680  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.655872  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10
I0110 19:49:09.655889  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10
I0110 19:49:09.655959  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.656001  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.657911  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (916.975µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.658703  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10/status: (2.230156ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39130]
I0110 19:49:09.658835  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (3.801574ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39128]
I0110 19:49:09.659317  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-10.1578947fefa28ac9: (2.533403ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39132]
I0110 19:49:09.660256  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (1.047032ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39130]
I0110 19:49:09.660523  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.660691  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:09.660705  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:09.660799  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.660849  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.662213  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (1.092001ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39132]
I0110 19:49:09.663474  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (4.299766ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.663942  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13/status: (2.748537ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39130]
I0110 19:49:09.664035  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.644056ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39134]
I0110 19:49:09.666335  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (1.940245ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39132]
I0110 19:49:09.666758  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.666974  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11
I0110 19:49:09.666986  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11
I0110 19:49:09.667192  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.667251  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.668471  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (4.175727ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.668971  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (1.122777ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39136]
I0110 19:49:09.670359  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11/status: (2.801147ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39132]
I0110 19:49:09.671002  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.293041ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39138]
I0110 19:49:09.672913  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (1.2321ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39132]
I0110 19:49:09.672957  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (3.705704ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0110 19:49:09.673217  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.673372  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11
I0110 19:49:09.673388  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11
I0110 19:49:09.673507  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.673558  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.675957  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11/status: (2.183977ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39136]
I0110 19:49:09.677408  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-11.157894800b3fd284: (3.216559ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39142]
I0110 19:49:09.677649  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (2.020002ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39140]
I0110 19:49:09.678206  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (1.807946ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39136]
I0110 19:49:09.678815  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.678971  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:09.678993  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:09.679364  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (5.993188ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39138]
I0110 19:49:09.679653  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.679818  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.682317  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (2.324111ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39142]
I0110 19:49:09.682523  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19/status: (1.817203ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39144]
I0110 19:49:09.684384  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-19.1578948009d51801: (2.954613ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39146]
I0110 19:49:09.684987  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (2.082938ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39144]
I0110 19:49:09.685448  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.685527  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (5.43133ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39140]
I0110 19:49:09.685722  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:09.685736  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:09.685830  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.685870  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.688331  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (1.907995ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39148]
I0110 19:49:09.689763  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-13.157894800ade6163: (2.691519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39150]
I0110 19:49:09.689901  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13/status: (3.795526ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39142]
I0110 19:49:09.690588  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (4.761165ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39146]
I0110 19:49:09.691668  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (1.392791ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39142]
I0110 19:49:09.691872  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.692022  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:09.692039  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:09.692133  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.692184  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.693630  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.142741ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39148]
I0110 19:49:09.693977  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14/status: (1.587388ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39142]
I0110 19:49:09.695413  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-14.157894800a517a0a: (2.516781ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0110 19:49:09.695570  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (4.704899ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39146]
I0110 19:49:09.695572  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.132988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39142]
I0110 19:49:09.695846  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.695990  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:09.696010  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:09.696108  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.696155  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.698024  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.158901ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.698377  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16/status: (1.779713ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39148]
I0110 19:49:09.699729  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (3.876848ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0110 19:49:09.700122  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.322273ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39148]
I0110 19:49:09.700199  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-16.157894800a19221b: (3.395575ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39156]
I0110 19:49:09.700362  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.700574  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:09.700593  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:09.700679  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.700723  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.702356  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15/status: (1.427451ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.703330  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (2.411279ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39148]
I0110 19:49:09.703890  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (890.652µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
E0110 19:49:09.704125  121338 scheduler.go:292] Error getting the updated preemptor pod object: pods "ppod-15" not found
I0110 19:49:09.704224  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:09.704287  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:09.704392  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:09.704435  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:09.704466  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-15.1578947ff05ae100: (2.708102ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.704538  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (4.495284ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0110 19:49:09.705741  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (1.108961ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.706269  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21/status: (1.603468ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39148]
I0110 19:49:09.707223  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-21.1578948008dce064: (2.114187ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.708903  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (1.698428ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39148]
I0110 19:49:09.709162  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:09.709419  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:09.709590  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:09.710650  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (5.805311ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0110 19:49:09.711311  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.391221ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.713607  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:09.713642  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:09.714815  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (3.787155ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0110 19:49:09.715539  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.654098ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.717889  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:09.717924  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:09.719083  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (3.935603ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0110 19:49:09.719861  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.659152ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.722090  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:09.722136  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:09.722165  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:09.723631  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (4.086347ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0110 19:49:09.723832  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:09.723868  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.464638ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.724255  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:09.725073  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:09.726222  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:09.726832  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:09.726875  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:09.727889  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (3.909623ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0110 19:49:09.730687  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.388525ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.731185  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:09.731216  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:09.732773  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (4.403222ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0110 19:49:09.733156  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.606024ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.735702  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:09.735793  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:09.737011  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (3.919277ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0110 19:49:09.737596  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.537716ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.740158  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:09.740199  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:09.741762  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (4.214992ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0110 19:49:09.742477  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.909645ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.745114  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:09.745157  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:09.747041  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.540769ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.747061  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (4.609905ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0110 19:49:09.749951  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:09.749996  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:09.751337  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (3.945191ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0110 19:49:09.751798  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.428976ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.754063  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:09.754118  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:09.755378  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (3.708994ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0110 19:49:09.755855  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.420464ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.758153  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:09.758193  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:09.759872  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.337581ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.760458  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (4.734059ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0110 19:49:09.763664  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:09.763723  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:09.765090  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (4.21001ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.765594  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.621378ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.768107  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:09.768218  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:09.769526  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (4.039613ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.770199  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.539281ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.772603  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:09.772678  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:09.773833  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (3.904337ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.774435  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.489344ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.776899  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:09.776940  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:09.778198  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (3.959731ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.778540  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.288095ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.781418  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:09.781455  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:09.782994  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (4.412143ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.783196  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.368998ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.786017  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:09.786179  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:09.787246  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (3.841105ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.787929  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.462297ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.790879  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:09.790929  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:09.791949  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (4.290445ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.792676  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.495015ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.794865  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:09.794924  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:09.796556  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (4.268784ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.796637  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.465872ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.799439  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:09.799478  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:09.801081  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (4.154873ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.801329  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.494818ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.803925  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:09.803963  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:09.805214  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (3.788181ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.805766  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.508918ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.808088  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:09.808132  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:09.809216  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (3.64405ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.809834  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.414768ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.812075  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:09.812112  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:09.813296  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (3.684015ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.813817  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.427126ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.815816  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:09.815858  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:09.817033  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (3.472532ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.817589  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.373947ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.819907  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:09.820011  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:09.821045  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (3.634285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.822061  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.72408ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.823875  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:09.823914  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:09.825387  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (4.009537ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.825858  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.656706ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.828417  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:09.828534  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:09.829617  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (3.839198ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.830665  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.490485ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.832481  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:09.832561  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:09.833905  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (3.998168ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.834324  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.519526ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.836873  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:09.836911  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:09.838157  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (3.862578ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.838567  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.392673ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.841042  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:09.841075  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:09.842384  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (3.78842ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.842920  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.578068ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.845421  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:09.845464  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:09.846636  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (3.868916ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.847130  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.378023ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.849556  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:09.849622  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:09.850788  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (3.752209ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.851746  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.889285ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.853916  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:09.853955  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:09.855222  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (3.960157ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.856322  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.951777ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:09.859639  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-0: (3.719411ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.861127  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1: (1.084364ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.865599  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (4.056654ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.868358  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-0: (1.098636ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.871181  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1: (1.153665ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.873920  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2: (1.108792ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.876636  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (1.097051ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.879280  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (1.003707ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.881968  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (1.086285ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.884718  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.142181ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.887436  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (1.153435ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.890070  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (1.067237ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.892933  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (1.193445ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.895956  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (1.263019ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.900813  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (3.075671ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.903942  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (1.383501ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.906939  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (1.34593ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.909915  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.295049ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.912869  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (1.315429ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.915807  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.254444ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.918710  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (1.27163ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.921728  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.337844ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.924788  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (1.139992ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.927552  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.123147ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.930455  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (1.309548ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.933458  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.25714ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.936430  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.250161ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.939463  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (1.43366ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.942417  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.287454ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.945399  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.238981ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.948212  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.187397ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.951305  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.338513ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.954300  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.32517ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.957272  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.271683ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.960343  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.254537ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.963188  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.238516ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.965887  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (1.071146ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.968760  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.186936ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.971486  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.159515ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.974330  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.194733ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.977108  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.183458ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.979794  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.030062ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.982414  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.040871ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.985017  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.008361ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.987706  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.055089ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.990375  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (1.134334ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.992987  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.00392ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.995579  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (972.974µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:09.998119  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (988.656µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.000707  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.021931ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.003283  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.024594ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.006075  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.055707ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.008776  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.065531ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.011519  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-0: (1.153559ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.014218  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1: (1.118362ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.017104  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (1.19768ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.019707  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.045271ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.019919  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0
I0110 19:49:10.019939  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0
I0110 19:49:10.020088  121338 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0", node "node1"
I0110 19:49:10.020106  121338 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I0110 19:49:10.020152  121338 factory.go:1166] Attempting to bind rpod-0 to node1
I0110 19:49:10.022287  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-0/binding: (1.864863ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:10.022290  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.100935ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.022549  121338 scheduler.go:569] pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0110 19:49:10.022842  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1
I0110 19:49:10.022860  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1
I0110 19:49:10.022969  121338 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1", node "node1"
I0110 19:49:10.022987  121338 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I0110 19:49:10.023028  121338 factory.go:1166] Attempting to bind rpod-1 to node1
I0110 19:49:10.024561  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.727505ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:10.024926  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1/binding: (1.694718ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.025142  121338 scheduler.go:569] pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0110 19:49:10.045597  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (20.207373ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.124994  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-0: (1.862398ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.227940  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1: (2.029518ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.228266  121338 preemption_test.go:561] Creating the preemptor pod...
I0110 19:49:10.230880  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.330368ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.231029  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:10.231046  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:10.231162  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.231209  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.231248  121338 preemption_test.go:567] Creating additional pods...
I0110 19:49:10.233183  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (1.433624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0110 19:49:10.233650  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.882197ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0110 19:49:10.233941  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod/status: (2.477364ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0110 19:49:10.234282  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.812397ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:10.235553  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (1.162907ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0110 19:49:10.235782  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.236762  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.044311ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:10.237825  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod/status: (1.748548ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0110 19:49:10.238821  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.616935ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:10.240828  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.510535ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:10.242066  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1: (3.807135ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0110 19:49:10.242349  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:10.242369  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:10.242485  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.242538  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.242748  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.544383ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:10.244605  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.536874ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39170]
I0110 19:49:10.244802  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod/status: (2.050498ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0110 19:49:10.245220  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (2.105783ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:10.245880  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.024082ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39170]
I0110 19:49:10.246429  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (1.297993ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0110 19:49:10.246463  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.917765ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0110 19:49:10.246732  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.249341  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod/status: (2.205497ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39172]
I0110 19:49:10.249349  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.907906ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39170]
I0110 19:49:10.249622  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/preemptor-pod.157894802cdd6281: (2.589533ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:10.249782  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:10.249795  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:10.249904  121338 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod", node "node1"
I0110 19:49:10.249920  121338 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I0110 19:49:10.249960  121338 factory.go:1166] Attempting to bind preemptor-pod to node1
I0110 19:49:10.250023  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-7
I0110 19:49:10.250057  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-7
I0110 19:49:10.250321  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.250393  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.251758  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.881894ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39170]
I0110 19:49:10.251993  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod/binding: (1.842019ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:10.252373  121338 scheduler.go:569] pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0110 19:49:10.252935  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7/status: (2.203441ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39172]
I0110 19:49:10.253345  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.675729ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39176]
I0110 19:49:10.253820  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.391894ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:10.253933  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (1.786368ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39170]
I0110 19:49:10.254545  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (1.071047ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39178]
I0110 19:49:10.254907  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.255036  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6
I0110 19:49:10.255066  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6
I0110 19:49:10.255116  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.345624ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39176]
I0110 19:49:10.255169  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.255214  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.256623  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.396381ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:10.258107  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.692123ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39174]
I0110 19:49:10.258370  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.134184ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39176]
I0110 19:49:10.258474  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.451675ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0110 19:49:10.258848  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6/status: (3.311127ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39178]
I0110 19:49:10.260526  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.507175ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39176]
I0110 19:49:10.260703  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.489811ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39178]
I0110 19:49:10.260995  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.261148  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9
I0110 19:49:10.261164  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9
I0110 19:49:10.261269  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.261319  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.263141  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (1.581482ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39174]
I0110 19:49:10.263560  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.514325ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39182]
I0110 19:49:10.263830  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9/status: (1.961554ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39180]
I0110 19:49:10.263987  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.923754ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39176]
I0110 19:49:10.265467  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (1.138971ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39182]
I0110 19:49:10.265847  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.266001  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12
I0110 19:49:10.266021  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12
I0110 19:49:10.266120  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.266165  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.266561  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.083735ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39174]
I0110 19:49:10.267891  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (1.031176ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39174]
I0110 19:49:10.268419  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.48799ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39184]
I0110 19:49:10.268643  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.57389ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39186]
I0110 19:49:10.269076  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12/status: (2.504546ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39182]
I0110 19:49:10.270772  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (1.311864ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39182]
I0110 19:49:10.271012  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.878724ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39184]
I0110 19:49:10.271029  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.272408  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9
I0110 19:49:10.272427  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9
I0110 19:49:10.272537  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.272572  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.272865  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.442694ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39182]
I0110 19:49:10.274739  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (1.761634ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39188]
I0110 19:49:10.275586  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.934405ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39190]
I0110 19:49:10.275625  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9/status: (2.677862ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39174]
I0110 19:49:10.275729  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-9.157894802ea8d3e7: (2.649429ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39182]
I0110 19:49:10.277157  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (1.068272ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39190]
I0110 19:49:10.277418  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.277509  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.362466ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39188]
I0110 19:49:10.277606  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:10.277620  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:10.277718  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.277772  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.280282  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16/status: (2.30084ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39190]
I0110 19:49:10.280301  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.962666ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39194]
I0110 19:49:10.280658  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (2.361459ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39192]
I0110 19:49:10.281044  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.132654ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39188]
I0110 19:49:10.281909  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.079413ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39190]
I0110 19:49:10.282154  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.282343  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:10.282363  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:10.282510  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.282558  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.283510  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.466396ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39192]
I0110 19:49:10.284198  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.341354ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39194]
I0110 19:49:10.285328  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.391965ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39196]
I0110 19:49:10.285565  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18/status: (2.711266ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39190]
I0110 19:49:10.285995  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.718187ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39192]
I0110 19:49:10.287181  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.191081ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39196]
I0110 19:49:10.287584  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.287729  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:10.287746  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:10.287821  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.287909  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.48518ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39192]
I0110 19:49:10.287908  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.289471  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.354444ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39196]
I0110 19:49:10.290152  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20/status: (1.807176ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39192]
I0110 19:49:10.290525  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.16063ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39194]
I0110 19:49:10.291064  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.317803ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39198]
I0110 19:49:10.291647  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.181417ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39192]
I0110 19:49:10.291932  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.292117  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:10.292135  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:10.292239  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.292289  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.293429  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.762203ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39194]
I0110 19:49:10.293756  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.157935ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39196]
I0110 19:49:10.294453  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22/status: (1.965759ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39192]
I0110 19:49:10.295685  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.843635ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39200]
I0110 19:49:10.296090  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.24255ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39192]
I0110 19:49:10.296129  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.805137ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39194]
I0110 19:49:10.296577  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.296720  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:10.296738  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:10.296811  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.296858  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.297940  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.477873ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39200]
I0110 19:49:10.298956  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.318311ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39204]
I0110 19:49:10.298968  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24/status: (1.879278ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39196]
I0110 19:49:10.298968  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (1.582338ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39202]
I0110 19:49:10.299875  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.286467ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39200]
I0110 19:49:10.300814  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (1.162633ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39196]
I0110 19:49:10.301055  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.301262  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:10.301308  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:10.301394  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.301436  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.301918  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.539909ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39200]
I0110 19:49:10.303212  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.174226ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39204]
I0110 19:49:10.303319  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.209927ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39206]
I0110 19:49:10.303840  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26/status: (1.608824ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39196]
I0110 19:49:10.304198  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.486619ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39200]
I0110 19:49:10.305448  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.245567ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39206]
I0110 19:49:10.305716  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.305880  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:10.305892  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:10.306045  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.492654ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39200]
I0110 19:49:10.306073  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.306124  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.308197  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28/status: (1.790455ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39206]
I0110 19:49:10.308640  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.290991ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39204]
I0110 19:49:10.309154  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.627217ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39208]
I0110 19:49:10.309171  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.70161ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39210]
I0110 19:49:10.310201  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.173311ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39204]
I0110 19:49:10.310479  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.310742  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:10.310760  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:10.310851  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.310941  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.311661  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.853276ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39208]
I0110 19:49:10.312451  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (990.121µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39206]
I0110 19:49:10.313049  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.22964ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39214]
I0110 19:49:10.314134  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.988864ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39208]
I0110 19:49:10.314174  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30/status: (2.824282ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39204]
I0110 19:49:10.316018  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.248701ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39206]
I0110 19:49:10.316275  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.646286ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39214]
I0110 19:49:10.316309  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.316453  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:10.316469  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:10.316627  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.316678  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.317935  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.094121ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39206]
I0110 19:49:10.318951  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.180363ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39214]
I0110 19:49:10.319582  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32/status: (2.4107ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39216]
I0110 19:49:10.319846  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.687647ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39218]
I0110 19:49:10.321112  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.178956ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39216]
I0110 19:49:10.321444  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.321636  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:10.321650  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:10.321773  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.321827  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.322474  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.563667ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39214]
I0110 19:49:10.323734  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.606104ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39218]
I0110 19:49:10.324399  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35/status: (1.584863ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39214]
I0110 19:49:10.324710  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.405079ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39206]
I0110 19:49:10.325091  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.083383ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39222]
I0110 19:49:10.326137  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.360769ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39218]
I0110 19:49:10.326452  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.326625  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:10.326641  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:10.327039  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.367227ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39222]
I0110 19:49:10.327591  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.327648  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.328870  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.272085ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39218]
I0110 19:49:10.330335  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36/status: (2.491692ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39220]
I0110 19:49:10.330797  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.310234ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39224]
I0110 19:49:10.330924  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.713884ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39218]
I0110 19:49:10.332067  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.299967ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39220]
I0110 19:49:10.332369  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.332562  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:10.332582  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:10.332694  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.332714  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.054205ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39226]
I0110 19:49:10.332737  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.334877  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38/status: (1.924139ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39218]
I0110 19:49:10.334899  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.832124ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39224]
I0110 19:49:10.335370  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.710242ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39226]
I0110 19:49:10.336340  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.021714ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39224]
I0110 19:49:10.336639  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.271653ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39228]
I0110 19:49:10.336956  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.337115  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:10.337139  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:10.337251  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.337301  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.339482  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.566642ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39232]
I0110 19:49:10.339557  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.154276ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39226]
I0110 19:49:10.339658  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41/status: (2.099945ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39218]
I0110 19:49:10.341599  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.597396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39218]
I0110 19:49:10.342100  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.342349  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:10.342377  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:10.342518  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.342613  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.355815  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (11.990575ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39234]
I0110 19:49:10.356761  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42/status: (12.580322ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39218]
I0110 19:49:10.357978  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (17.882824ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39232]
I0110 19:49:10.358064  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.533995ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39234]
I0110 19:49:10.358553  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (1.258943ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39218]
I0110 19:49:10.358786  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.358920  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:10.358936  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:10.359010  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.359054  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.360887  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.484612ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39234]
I0110 19:49:10.361913  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.422729ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39232]
I0110 19:49:10.362468  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-41.1578948033303c27: (2.369495ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39238]
I0110 19:49:10.362679  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (24.948547ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39230]
I0110 19:49:10.362973  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41/status: (3.630518ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39218]
I0110 19:49:10.364697  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.900184ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.364803  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.376317ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39238]
I0110 19:49:10.365073  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.365319  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:10.365337  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:10.365440  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.365509  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.367551  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.499996ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.367648  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.391041ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39240]
I0110 19:49:10.368177  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.282638ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39242]
I0110 19:49:10.368205  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44/status: (1.976364ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39234]
I0110 19:49:10.369703  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.683099ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39240]
I0110 19:49:10.370017  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.278783ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39234]
I0110 19:49:10.370321  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.370479  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:10.370518  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:10.370688  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.370733  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.372514  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.219016ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39240]
I0110 19:49:10.372965  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.62977ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.373309  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46/status: (1.950193ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39234]
I0110 19:49:10.374944  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.226636ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.375051  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.319083ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39244]
I0110 19:49:10.375259  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.375447  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:10.375462  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:10.375561  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.375622  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.377679  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.870153ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.377766  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.722219ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39246]
I0110 19:49:10.377922  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48/status: (2.070524ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39240]
I0110 19:49:10.379518  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.159879ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39246]
I0110 19:49:10.379791  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.379991  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:10.380009  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:10.380126  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.380174  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.381742  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.24669ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.382161  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46/status: (1.73965ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39246]
I0110 19:49:10.383341  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-46.15789480352e5b75: (2.404953ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39248]
I0110 19:49:10.383532  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (942.372µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39246]
I0110 19:49:10.383804  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.383966  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:10.383981  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:10.384055  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.384090  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.385911  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48/status: (1.622788ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39248]
I0110 19:49:10.386936  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.247214ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39250]
I0110 19:49:10.387008  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-48.157894803578f4bb: (2.391851ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.387727  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.023299ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39248]
I0110 19:49:10.388000  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.388162  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:10.388177  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:10.388284  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.388378  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.389727  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.078011ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.390415  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.475155ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39252]
I0110 19:49:10.390610  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49/status: (1.903093ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39250]
I0110 19:49:10.392304  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.226848ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39252]
I0110 19:49:10.392598  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.392790  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:10.392807  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:10.392907  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.392987  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.394447  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.192589ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.394929  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47/status: (1.7157ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39252]
I0110 19:49:10.395507  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.874061ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0110 19:49:10.396660  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.224467ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39252]
I0110 19:49:10.396936  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.397119  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:10.397164  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:10.397302  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.397346  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.399196  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.181498ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.400084  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49/status: (2.416733ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0110 19:49:10.400296  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-49.15789480363b88da: (2.228033ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39256]
I0110 19:49:10.401662  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.181948ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0110 19:49:10.401989  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.402190  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:10.402207  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:10.402312  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.402454  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.403821  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.116241ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39256]
I0110 19:49:10.404325  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47/status: (1.568994ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.406082  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.316951ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.406481  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.406713  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:10.406733  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:10.406973  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.407031  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.407444  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-47.157894803681e921: (3.584936ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39258]
I0110 19:49:10.408484  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.153997ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39256]
I0110 19:49:10.409153  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44/status: (1.900692ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.410362  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-44.1578948034de55dd: (2.221433ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39258]
I0110 19:49:10.410557  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.010213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.410822  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.411002  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:10.411022  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:10.411144  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.411199  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.412424  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (1.020285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.412990  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45/status: (1.571761ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39256]
I0110 19:49:10.413350  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.607593ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39260]
I0110 19:49:10.414557  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (1.173122ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39256]
I0110 19:49:10.414854  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.415042  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:10.415055  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:10.415204  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.415253  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.416554  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (1.05946ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.417815  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42/status: (2.34417ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39260]
I0110 19:49:10.418794  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-42.157894803380bc1c: (2.492029ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39262]
I0110 19:49:10.419212  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (994.341µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39260]
I0110 19:49:10.419512  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.419760  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:10.419777  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:10.419874  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.419927  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.421342  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (1.135384ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.421768  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45/status: (1.535063ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39262]
I0110 19:49:10.423458  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (1.181124ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39262]
I0110 19:49:10.423698  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-45.157894803797c46c: (3.008583ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39264]
I0110 19:49:10.423975  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.424156  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:10.424171  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:10.424284  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.424331  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.426016  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.33563ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.426565  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43/status: (1.87482ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39262]
I0110 19:49:10.426957  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.013076ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39266]
I0110 19:49:10.428697  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.2846ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39262]
I0110 19:49:10.428978  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.429215  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:10.429246  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:10.429371  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.429423  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.430985  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.249921ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.432638  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-38.1578948032ea9a1d: (2.280936ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39268]
I0110 19:49:10.432927  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38/status: (2.956901ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39262]
I0110 19:49:10.434840  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.438422ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39268]
I0110 19:49:10.435129  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.435337  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:10.435357  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:10.435481  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.435560  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.437108  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.236554ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.437630  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43/status: (1.815779ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39268]
I0110 19:49:10.438702  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-43.157894803860318c: (2.176909ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0110 19:49:10.439891  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.744155ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39268]
I0110 19:49:10.440197  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.440390  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:10.440413  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:10.440550  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.440617  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.442218  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.330425ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0110 19:49:10.442975  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36/status: (1.990598ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.444156  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-36.15789480329ce832: (2.758595ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39272]
I0110 19:49:10.444950  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.598565ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0110 19:49:10.445313  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.445481  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:10.445516  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:10.445628  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.445685  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.447459  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.471764ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0110 19:49:10.447712  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.497184ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39274]
I0110 19:49:10.447770  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40/status: (1.836907ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39272]
I0110 19:49:10.449372  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.208497ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39274]
I0110 19:49:10.449652  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.449866  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:10.449885  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:10.449979  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.450041  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.452078  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.422938ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39276]
I0110 19:49:10.452202  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.912485ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0110 19:49:10.452287  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39/status: (1.996461ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39274]
I0110 19:49:10.453941  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.224867ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0110 19:49:10.454326  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.454581  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:10.454601  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:10.454692  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.454766  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.456774  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.745236ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0110 19:49:10.456779  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40/status: (1.615104ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39276]
I0110 19:49:10.458023  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-40.1578948039a60378: (2.578878ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0110 19:49:10.458684  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.258401ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39276]
I0110 19:49:10.459119  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.459289  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:10.459306  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:10.459430  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.459474  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.460840  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.126367ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0110 19:49:10.461857  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39/status: (2.125944ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0110 19:49:10.463306  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-39.1578948039e87387: (3.148555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39280]
I0110 19:49:10.463458  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.249711ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0110 19:49:10.463722  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.463905  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:10.463928  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:10.464021  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.464057  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.466259  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.417189ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0110 19:49:10.466835  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35/status: (2.497413ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39280]
I0110 19:49:10.467470  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-35.1578948032441bd3: (2.577659ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39282]
I0110 19:49:10.468679  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.211461ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39280]
I0110 19:49:10.469037  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.469253  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:10.469275  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:10.469401  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.469454  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.471794  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.090902ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0110 19:49:10.471891  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37/status: (2.1863ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39282]
I0110 19:49:10.472334  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.292741ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39284]
I0110 19:49:10.473580  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.244158ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39282]
I0110 19:49:10.473822  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.473944  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:10.473958  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:10.474033  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.474074  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.474975  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (1.650812ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39284]
I0110 19:49:10.475658  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.064037ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0110 19:49:10.476557  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32/status: (1.977735ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39282]
I0110 19:49:10.477353  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-32.1578948031f58973: (2.146ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39284]
I0110 19:49:10.477997  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (966.871µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0110 19:49:10.478305  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.478531  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:10.478551  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:10.478671  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.478711  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.480135  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.134927ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39286]
I0110 19:49:10.480835  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37/status: (1.863815ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39284]
I0110 19:49:10.481906  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-37.157894803b10b716: (2.184038ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0110 19:49:10.482278  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.075751ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39284]
I0110 19:49:10.482559  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.482718  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:10.482732  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:10.482826  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.482876  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.484663  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.481052ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39286]
I0110 19:49:10.484857  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34/status: (1.773098ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0110 19:49:10.485374  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.888375ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39290]
I0110 19:49:10.486822  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.083141ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0110 19:49:10.487083  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.487301  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:10.487319  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:10.487415  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.487466  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.488969  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.1629ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39286]
I0110 19:49:10.489803  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30/status: (2.001487ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0110 19:49:10.490696  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-30.15789480319dff4c: (2.075035ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0110 19:49:10.491311  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.083865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0110 19:49:10.491631  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.491846  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:10.491867  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:10.492027  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.492091  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.493752  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.396318ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39286]
I0110 19:49:10.493752  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34/status: (1.397799ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0110 19:49:10.495586  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-34.157894803bdd79f1: (2.638431ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39294]
I0110 19:49:10.495619  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.33512ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39286]
I0110 19:49:10.495912  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.496107  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:10.496123  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:10.496211  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.496272  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.497856  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (1.285011ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0110 19:49:10.498216  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.311528ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39296]
I0110 19:49:10.498662  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33/status: (2.139342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39294]
I0110 19:49:10.500273  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (1.048541ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39296]
I0110 19:49:10.500584  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.500778  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:10.500796  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:10.501041  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.501124  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.502593  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.21711ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0110 19:49:10.503202  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28/status: (1.833481ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39296]
I0110 19:49:10.504164  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-28.1578948031547f90: (2.28358ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39298]
I0110 19:49:10.504866  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.168426ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39296]
I0110 19:49:10.505152  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.505328  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:10.505348  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:10.505512  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.505580  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.507785  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (1.285919ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39300]
I0110 19:49:10.507785  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33/status: (1.812729ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39298]
I0110 19:49:10.508831  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-33.157894803ca9e4a8: (2.632476ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0110 19:49:10.509450  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (1.181958ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39298]
I0110 19:49:10.509757  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.509951  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:10.510006  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:10.510168  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.510263  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.511826  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.249095ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0110 19:49:10.512307  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.419044ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39302]
I0110 19:49:10.512966  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31/status: (2.373315ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39300]
I0110 19:49:10.514753  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.288394ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39302]
I0110 19:49:10.515006  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.515202  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:10.515272  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:10.515428  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.515508  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.517075  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.296979ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0110 19:49:10.517409  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26/status: (1.638688ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39302]
I0110 19:49:10.518947  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.107011ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39302]
I0110 19:49:10.519211  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.519270  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-26.15789480310cfb61: (2.96084ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39304]
I0110 19:49:10.519397  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:10.519413  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:10.519528  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.519580  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.520750  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (974.063µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39302]
I0110 19:49:10.521325  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31/status: (1.492258ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0110 19:49:10.522807  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.080622ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0110 19:49:10.523121  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.523238  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-31.157894803d7ed5c3: (2.837863ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39306]
I0110 19:49:10.523324  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:10.523345  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:10.523459  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.523529  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.525260  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.494056ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39302]
I0110 19:49:10.525760  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29/status: (2.011531ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0110 19:49:10.525910  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.8271ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39308]
I0110 19:49:10.527387  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.123705ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0110 19:49:10.527654  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.527814  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:10.527830  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:10.527899  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.527941  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.530826  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (2.548224ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39302]
I0110 19:49:10.530871  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24/status: (2.682581ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0110 19:49:10.531845  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-24.1578948030c71cb2: (2.551121ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39310]
I0110 19:49:10.532997  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (1.179634ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0110 19:49:10.533326  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.533532  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:10.533547  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:10.533649  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.533704  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.535344  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.35054ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39302]
I0110 19:49:10.535896  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29/status: (1.968572ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39310]
I0110 19:49:10.537165  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-29.157894803e49cac9: (2.770667ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0110 19:49:10.537607  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.234982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39310]
I0110 19:49:10.537891  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.538099  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:10.538118  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:10.538214  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.538278  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.540445  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.514241ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0110 19:49:10.540526  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27/status: (1.984394ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0110 19:49:10.540594  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (2.053221ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39302]
I0110 19:49:10.542071  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.148213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0110 19:49:10.542374  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.542561  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:10.542581  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:10.542695  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.542749  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.544102  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.07898ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0110 19:49:10.544846  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22/status: (1.864173ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0110 19:49:10.545825  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-22.157894803081666a: (2.134906ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39316]
I0110 19:49:10.546297  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.077036ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0110 19:49:10.546632  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.546784  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:10.546797  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:10.546890  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.546941  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.548263  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.041342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0110 19:49:10.548722  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27/status: (1.556193ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39316]
I0110 19:49:10.550014  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-27.157894803f2ad9ed: (2.019349ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39318]
I0110 19:49:10.550337  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.254281ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39316]
I0110 19:49:10.550626  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.550812  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:10.550828  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:10.550931  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.550983  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.552271  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.061252ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0110 19:49:10.552983  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.381908ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39320]
I0110 19:49:10.553302  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25/status: (2.067818ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39318]
I0110 19:49:10.554991  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.202267ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39320]
I0110 19:49:10.555358  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.555577  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:10.555613  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:10.555714  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.555761  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.557324  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.317506ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0110 19:49:10.557828  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20/status: (1.855038ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39320]
I0110 19:49:10.559186  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-20.15789480303e7887: (2.418108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39322]
I0110 19:49:10.559209  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.047182ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39320]
I0110 19:49:10.559627  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.559798  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:10.559815  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:10.559914  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.559967  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.561446  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.248147ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39322]
I0110 19:49:10.562091  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25/status: (1.896148ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0110 19:49:10.563291  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-25.157894803fecc1b9: (2.592658ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39324]
I0110 19:49:10.564421  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.261135ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0110 19:49:10.564769  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.564941  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:10.564982  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:10.565080  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.565140  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.567256  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.765796ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39324]
I0110 19:49:10.567337  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.510481ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39326]
I0110 19:49:10.567399  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23/status: (1.91547ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39322]
I0110 19:49:10.568874  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.086847ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39322]
I0110 19:49:10.569090  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.569206  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:10.569221  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:10.569308  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.569349  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.570835  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.109358ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39324]
I0110 19:49:10.571630  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18/status: (2.044952ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39322]
I0110 19:49:10.572270  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-18.157894802fecddf2: (2.220746ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0110 19:49:10.573368  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.211633ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39322]
I0110 19:49:10.573737  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.573965  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:10.573985  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:10.574113  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.574166  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.575546  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.112355ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39324]
I0110 19:49:10.576473  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23/status: (2.009527ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0110 19:49:10.577165  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (1.191396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39324]
I0110 19:49:10.577543  121338 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I0110 19:49:10.577566  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-23.1578948040c4bfc1: (2.575702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39330]
I0110 19:49:10.577974  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.07167ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0110 19:49:10.578594  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.578780  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-0: (1.04758ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39324]
I0110 19:49:10.578790  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:10.578804  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:10.578888  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.578930  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.580309  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (1.143727ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39330]
I0110 19:49:10.580343  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1: (1.195846ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0110 19:49:10.580795  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.339182ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.581551  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21/status: (2.071578ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39332]
I0110 19:49:10.581799  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2: (1.029115ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0110 19:49:10.582895  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (932.276µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.583122  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.583166  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (1.003202ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39330]
I0110 19:49:10.583276  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:10.583292  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:10.583430  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.583483  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.584673  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (935.885µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39330]
I0110 19:49:10.585459  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.770613ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.585923  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-16.157894802fa3e1bc: (1.784611ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39338]
I0110 19:49:10.585958  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16/status: (1.815344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39336]
I0110 19:49:10.586320  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (1.339031ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39330]
I0110 19:49:10.587728  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.034452ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39338]
I0110 19:49:10.587728  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.087228ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39336]
I0110 19:49:10.588011  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.588262  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:10.588282  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:10.588399  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.588447  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.589200  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (1.0618ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39336]
I0110 19:49:10.589886  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (1.029057ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0110 19:49:10.590328  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21/status: (1.659521ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.590528  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (942.456µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39336]
I0110 19:49:10.591792  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (997.379µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.591804  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-21.15789480419734c8: (2.485181ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39342]
I0110 19:49:10.592034  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.592198  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:10.592214  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:10.592322  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.592368  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.592525  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (1.128553ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0110 19:49:10.594174  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.405456ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.594557  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (1.521385ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0110 19:49:10.594738  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (2.044378ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0110 19:49:10.595051  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19/status: (2.425801ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39342]
I0110 19:49:10.595921  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (1.002352ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0110 19:49:10.596342  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (880.709µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0110 19:49:10.596596  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.596737  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:10.596752  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:10.596816  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.596863  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.597584  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (1.197247ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0110 19:49:10.598590  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (1.462549ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.599039  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17/status: (1.915663ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0110 19:49:10.599105  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.518619ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39346]
I0110 19:49:10.599469  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (1.44436ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0110 19:49:10.600467  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (1.020546ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0110 19:49:10.600757  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.600849  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (961.607µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0110 19:49:10.600996  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:10.601038  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:10.601152  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.601254  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.602062  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (906.267µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0110 19:49:10.602634  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (1.207051ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.604136  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.695107ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0110 19:49:10.604169  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-19.1578948042643c58: (2.372666ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39350]
I0110 19:49:10.604277  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19/status: (2.550071ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39348]
I0110 19:49:10.605899  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (1.19323ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0110 19:49:10.606155  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (1.410839ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.606385  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.606537  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:10.606552  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:10.606664  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.606700  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.607639  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.337122ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0110 19:49:10.608200  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (1.022964ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39352]
I0110 19:49:10.608698  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17/status: (1.800167ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.609168  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (1.101546ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0110 19:49:10.610469  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (1.375714ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.610555  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-17.1578948042a8d5bb: (3.107371ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39354]
I0110 19:49:10.610996  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.611241  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:10.611260  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:10.611286  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.116244ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0110 19:49:10.611340  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.611382  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.612735  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (879.086µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39352]
I0110 19:49:10.613404  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15/status: (1.544685ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.613898  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.791596ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39358]
I0110 19:49:10.614265  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (2.087145ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39356]
I0110 19:49:10.615042  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (1.170405ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.615321  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.615545  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12
I0110 19:49:10.615563  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12
I0110 19:49:10.615699  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.615744  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.616056  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.296562ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39358]
I0110 19:49:10.617135  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (1.151517ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39352]
I0110 19:49:10.617607  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (973.005µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0110 19:49:10.618045  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12/status: (2.075553ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.619026  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (1.025269ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0110 19:49:10.619316  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-12.157894802ef2c4a8: (2.985781ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39358]
I0110 19:49:10.619586  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (1.050563ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.619798  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.619945  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:10.619959  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:10.620035  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.620078  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.620684  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.063427ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0110 19:49:10.621899  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15/status: (1.581229ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.622410  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.23198ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0110 19:49:10.622546  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (1.697273ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39352]
I0110 19:49:10.623334  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-15.1578948043865e39: (2.443193ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39362]
I0110 19:49:10.623351  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (1.038364ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0110 19:49:10.623703  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.623847  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:10.623864  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:10.623923  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.623963  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.624060  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.070416ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0110 19:49:10.625139  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (861.272µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39352]
I0110 19:49:10.626016  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.69437ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0110 19:49:10.626373  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.658069ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39364]
I0110 19:49:10.626458  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14/status: (2.26423ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39362]
I0110 19:49:10.627829  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (970.152µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39352]
I0110 19:49:10.627889  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.13801ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0110 19:49:10.628024  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.628263  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:10.628282  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:10.628366  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.628408  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.629258  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.049871ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0110 19:49:10.630215  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13/status: (1.584037ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39352]
I0110 19:49:10.630884  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.183533ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0110 19:49:10.630956  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.979081ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39366]
I0110 19:49:10.630957  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (1.078796ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39368]
I0110 19:49:10.631952  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (963.72µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39352]
I0110 19:49:10.632410  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.632603  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:10.632619  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:10.632679  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.040965ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0110 19:49:10.632702  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.632746  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.634336  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (962.735µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0110 19:49:10.634478  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.427683ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39368]
I0110 19:49:10.634562  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14/status: (1.555513ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39366]
I0110 19:49:10.635754  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.061931ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0110 19:49:10.635872  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-14.1578948044465a88: (2.514042ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39372]
I0110 19:49:10.636336  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.477158ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39368]
I0110 19:49:10.636643  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.636820  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:10.636837  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:10.636908  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.636980  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.637095  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.010167ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0110 19:49:10.638715  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (1.42783ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0110 19:49:10.638715  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13/status: (1.515027ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39372]
I0110 19:49:10.638766  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.293363ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39366]
I0110 19:49:10.640305  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.044732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39366]
I0110 19:49:10.640803  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (1.061349ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39372]
I0110 19:49:10.641002  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.641167  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11
I0110 19:49:10.641184  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11
I0110 19:49:10.641289  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.641329  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.641550  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-13.15789480448a3237: (3.131432ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0110 19:49:10.643222  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.308595ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0110 19:49:10.643784  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11/status: (2.059034ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39372]
I0110 19:49:10.644066  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (3.330935ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39366]
I0110 19:49:10.644523  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (2.206831ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0110 19:49:10.645535  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (1.39648ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39372]
I0110 19:49:10.645619  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.235648ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39366]
I0110 19:49:10.645890  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.646022  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6
I0110 19:49:10.646041  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6
I0110 19:49:10.646135  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.646188  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.647158  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.14706ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0110 19:49:10.647933  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.139223ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0110 19:49:10.648750  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.210045ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0110 19:49:10.648977  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6/status: (2.565427ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0110 19:49:10.649807  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-6.157894802e4bab93: (2.688561ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39378]
I0110 19:49:10.650179  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (1.018484ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0110 19:49:10.650839  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.003137ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0110 19:49:10.651110  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.651352  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10
I0110 19:49:10.651389  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10
I0110 19:49:10.651619  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.651679  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.651720  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.020701ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39378]
I0110 19:49:10.653597  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.564894ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0110 19:49:10.653859  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.623309ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39380]
I0110 19:49:10.653939  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10/status: (2.02183ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0110 19:49:10.654820  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (2.776125ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39378]
I0110 19:49:10.655356  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (988.72µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0110 19:49:10.655450  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (1.509177ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0110 19:49:10.655722  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.655915  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-7
I0110 19:49:10.655925  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-7
I0110 19:49:10.656069  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.656106  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.657396  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.301924ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39378]
I0110 19:49:10.660310  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-7.157894802e01d001: (3.204642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39384]
I0110 19:49:10.660888  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (2.20224ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39382]
I0110 19:49:10.661635  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (2.644851ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39378]
I0110 19:49:10.661926  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7/status: (4.516302ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39380]
I0110 19:49:10.665134  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (3.807925ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39384]
I0110 19:49:10.665140  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (2.820383ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39378]
I0110 19:49:10.665567  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.665770  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8
I0110 19:49:10.665785  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8
I0110 19:49:10.665856  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.665897  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.667558  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.963964ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39384]
I0110 19:49:10.667655  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (1.429072ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39386]
I0110 19:49:10.668146  121338 preemption_test.go:598] Cleaning up all pods...
I0110 19:49:10.669403  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.840167ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39388]
I0110 19:49:10.670054  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8/status: (2.276097ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0110 19:49:10.682425  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (11.782262ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39388]
I0110 19:49:10.682425  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-0: (12.764455ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39384]
I0110 19:49:10.682833  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.682987  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-1
I0110 19:49:10.683012  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-1
I0110 19:49:10.683132  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.683188  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.686003  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1/status: (2.560083ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39386]
I0110 19:49:10.686734  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1: (2.711139ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39396]
I0110 19:49:10.687007  121338 store.go:355] GuaranteedUpdate of /650bb268-db60-4e7c-a018-1efbf068072c/pods/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-1 failed because of a conflict, going to retry
I0110 19:49:10.688110  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.480938ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39398]
I0110 19:49:10.688889  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1: (2.258835ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39386]
I0110 19:49:10.689300  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.689480  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5
I0110 19:49:10.689512  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5
I0110 19:49:10.689589  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.689662  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.690536  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1: (7.176685ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39388]
I0110 19:49:10.691254  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (1.225484ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39396]
I0110 19:49:10.691617  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.266952ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.693632  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5/status: (3.692011ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39398]
I0110 19:49:10.695275  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (1.233542ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.695537  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.695679  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4
I0110 19:49:10.695696  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4
I0110 19:49:10.695680  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2: (4.211041ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39396]
I0110 19:49:10.695765  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.695809  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.697565  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (1.584096ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.697828  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.603144ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0110 19:49:10.698917  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4/status: (2.864715ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39388]
I0110 19:49:10.699678  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (3.45075ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.700916  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (1.073331ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0110 19:49:10.701624  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.701857  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5
I0110 19:49:10.701899  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5
I0110 19:49:10.702105  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.702179  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.703864  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (1.411195ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0110 19:49:10.704932  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5/status: (2.286447ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.705198  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (5.204391ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.706556  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (1.228032ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.706793  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.706954  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8
I0110 19:49:10.706986  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8
I0110 19:49:10.707165  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:10.707203  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-5.157894804830cfb8: (3.817774ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0110 19:49:10.707225  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:10.708861  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (1.411201ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.709249  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8/status: (1.766946ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0110 19:49:10.709794  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (4.318162ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.710761  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (1.102103ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0110 19:49:10.710870  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-8.1578948046c639f3: (2.951063ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0110 19:49:10.710989  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:10.713480  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6
I0110 19:49:10.713532  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6
I0110 19:49:10.714784  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (4.038204ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.715178  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.342755ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0110 19:49:10.717895  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-7
I0110 19:49:10.717944  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-7
I0110 19:49:10.718949  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (3.825791ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.719725  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.518183ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0110 19:49:10.721728  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8
I0110 19:49:10.721765  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-8
I0110 19:49:10.722308  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:10.722956  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (3.667469ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.723443  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.3539ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0110 19:49:10.723986  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:10.724374  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:10.725220  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:10.725788  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9
I0110 19:49:10.725831  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-9
I0110 19:49:10.726323  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:10.727627  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (4.298434ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.727639  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.568318ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0110 19:49:10.732056  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10
I0110 19:49:10.732097  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10
I0110 19:49:10.732917  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (4.612608ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.734187  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.740575ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0110 19:49:10.736355  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11
I0110 19:49:10.736393  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-11
I0110 19:49:10.738113  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (4.771889ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.738289  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.490038ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0110 19:49:10.740859  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12
I0110 19:49:10.740904  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-12
I0110 19:49:10.742391  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (3.905783ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.742897  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.755836ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0110 19:49:10.745288  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:10.745325  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:10.746616  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (3.852433ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.747154  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.525136ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0110 19:49:10.749475  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:10.749529  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-14
I0110 19:49:10.750931  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (4.034797ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.751153  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.336977ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0110 19:49:10.753744  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:10.753788  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:10.755372  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (3.982701ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.755437  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.404924ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.758301  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:10.758333  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-16
I0110 19:49:10.760173  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.581356ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.760192  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (4.379485ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.762946  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:10.762981  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-17
I0110 19:49:10.764274  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (3.720996ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.764577  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.365835ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.767107  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:10.767144  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:10.768669  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (3.980496ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.768760  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.315548ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.771470  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:10.771535  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-19
I0110 19:49:10.772670  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (3.661504ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.773725  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.902707ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.775481  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:10.775545  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:10.776884  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (3.830064ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.777333  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.498447ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.780125  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:10.780162  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-21
I0110 19:49:10.781318  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (4.092701ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.782008  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.469004ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.784173  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:10.784209  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:10.785598  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (3.763213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.786250  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.701003ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.788389  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:10.788428  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-23
I0110 19:49:10.789701  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (3.788636ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.790004  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.314532ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.792613  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:10.792647  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-24
I0110 19:49:10.794152  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (4.166317ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.794590  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.649974ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.797177  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:10.797211  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:10.799011  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (4.5339ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.799065  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.585705ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.801908  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:10.802001  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:10.803400  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (4.02242ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.803929  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.515545ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.806079  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:10.806124  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-27
I0110 19:49:10.807783  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.370819ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.807964  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (4.193674ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.810935  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:10.810975  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:10.812190  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (3.84636ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.812957  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.669351ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.815349  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:10.815385  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-29
I0110 19:49:10.816554  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (3.948989ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.817299  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.592781ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.819979  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:10.820035  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:10.821416  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (4.227746ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.821776  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.458689ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.824624  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:10.824695  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:10.826785  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (5.023988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.827160  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.432023ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.829958  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:10.830046  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:10.831434  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (4.215988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.831873  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.476386ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.834391  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:10.834482  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:10.835731  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (3.887258ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.836106  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.31915ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.838588  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:10.838629  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:10.840031  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (3.939552ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.840453  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.56757ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.842716  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:10.842818  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:10.843942  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (3.597254ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.845793  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.727734ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.847127  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:10.847193  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:10.848444  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (4.097869ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.849100  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.620471ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.851296  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:10.851382  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:10.853394  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.639583ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.853868  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (5.051673ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.858994  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:10.859036  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:10.860945  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (6.729632ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.863039  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.297273ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.866198  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:10.866314  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:10.866605  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (4.498657ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.868831  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.601427ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.869436  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:10.869460  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:10.870985  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.286196ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.872055  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (5.139982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.876131  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:10.876174  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:10.877931  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (5.482623ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.880103  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.224542ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.884139  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:10.884185  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:10.888856  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (4.320291ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.890895  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (12.593906ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.895158  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:10.895197  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:10.897036  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.554529ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.897381  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (4.949967ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.900473  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:10.900532  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:10.901686  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (3.890733ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.902539  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.71685ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.904590  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:10.904625  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:10.906100  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (4.075153ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.906379  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.396746ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.909097  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:10.909139  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:10.910551  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (3.922461ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.911068  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.675951ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.913860  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:10.913900  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:10.915399  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (4.415345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.915608  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.417323ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.918728  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:10.918771  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:10.920470  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (4.655391ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.920722  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.700589ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.923334  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:10.923427  121338 scheduler.go:450] Skip schedule deleting pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:10.924588  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (3.768757ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.925602  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.502278ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:10.928782  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-0: (3.823458ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.930309  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1: (1.183785ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.934783  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (4.105408ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.937518  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-0: (1.123589ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.940050  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1: (920.138µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.942756  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2: (1.141871ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.945400  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (1.117022ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.947896  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (986.825µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.950376  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (993.329µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.952995  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.037631ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.955588  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (954.711µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.958167  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (1.008093ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.960889  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (1.161364ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.963593  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (1.13912ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.966352  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (1.190309ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.969199  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (1.201494ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.971786  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (1.00616ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.974667  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (1.2133ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.978564  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (2.167377ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.982178  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.621935ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.985956  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (2.005654ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.989294  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.588472ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.992632  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (1.69213ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.995969  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.684307ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:10.999322  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (1.182972ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.002208  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.228503ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.005820  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.874107ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.009447  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (1.6098ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.012656  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.060825ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.015358  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.092762ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.018394  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.204483ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.021559  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.479397ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.024509  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.201293ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.027282  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (1.162993ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.029823  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (932.092µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.032201  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (857.853µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.035081  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (1.029524ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.037676  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.032732ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.040320  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.067322ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.043029  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.065788ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.045696  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.064885ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.048308  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.024561ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.051247  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.093578ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.053944  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.02212ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.056719  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.006614ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.059518  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (1.095894ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.062554  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.394981ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.065247  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.110867ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.067894  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (1.051993ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.070658  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.168963ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.073339  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.123265ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.076115  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.073372ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.078926  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.152153ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.081744  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-0: (1.218907ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.084698  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1: (1.315136ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.087420  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (1.14129ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.090023  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.100796ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.090202  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0
I0110 19:49:11.091606  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0
I0110 19:49:11.091774  121338 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0", node "node1"
I0110 19:49:11.091786  121338 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I0110 19:49:11.091833  121338 factory.go:1166] Attempting to bind rpod-0 to node1
I0110 19:49:11.094277  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-0/binding: (2.201205ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:11.094595  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.546645ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.094989  121338 scheduler.go:569] pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0110 19:49:11.095363  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1
I0110 19:49:11.095375  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1
I0110 19:49:11.095529  121338 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1", node "node1"
I0110 19:49:11.095541  121338 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I0110 19:49:11.095590  121338 factory.go:1166] Attempting to bind rpod-1 to node1
I0110 19:49:11.097150  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.879145ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.097323  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1/binding: (1.52905ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:11.097479  121338 scheduler.go:569] pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0110 19:49:11.099257  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.5264ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:11.197321  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-0: (1.775294ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:11.300197  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1: (1.973097ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:11.300577  121338 preemption_test.go:561] Creating the preemptor pod...
I0110 19:49:11.303014  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.170158ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:11.303242  121338 preemption_test.go:567] Creating additional pods...
I0110 19:49:11.303292  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:11.303313  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:11.303439  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.303506  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.305860  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (1.517162ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39572]
I0110 19:49:11.306599  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.162938ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0110 19:49:11.306609  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod/status: (2.281282ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.307209  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.855394ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0110 19:49:11.308808  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (1.677872ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.309091  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.309854  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.180908ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39572]
I0110 19:49:11.311729  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod/status: (2.197231ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.311993  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.640016ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39572]
I0110 19:49:11.314462  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.854058ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0110 19:49:11.316886  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.876476ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0110 19:49:11.318443  121338 wrap.go:47] DELETE /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/rpod-1: (6.167964ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.318735  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:11.318757  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod
I0110 19:49:11.318897  121338 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod", node "node1"
I0110 19:49:11.318914  121338 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I0110 19:49:11.318957  121338 factory.go:1166] Attempting to bind preemptor-pod to node1
I0110 19:49:11.319005  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4
I0110 19:49:11.319017  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4
I0110 19:49:11.319112  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.319147  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.319348  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.960025ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0110 19:49:11.321087  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.994904ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.322298  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4/status: (2.647825ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39578]
I0110 19:49:11.322417  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.359465ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39582]
I0110 19:49:11.322467  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (1.697879ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39576]
I0110 19:49:11.323350  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.377899ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.324426  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod/binding: (4.319032ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0110 19:49:11.324604  121338 scheduler.go:569] pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0110 19:49:11.324669  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.711922ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39578]
I0110 19:49:11.325109  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (1.946265ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39580]
I0110 19:49:11.325562  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.325723  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3
I0110 19:49:11.325744  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3
I0110 19:49:11.325841  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.325885  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.327312  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.468729ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0110 19:49:11.327396  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (1.186183ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.327844  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.699041ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39578]
I0110 19:49:11.328949  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3/status: (2.740879ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39580]
I0110 19:49:11.329607  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.906906ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0110 19:49:11.330344  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.703799ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39578]
I0110 19:49:11.332162  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (1.219728ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.332315  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.506738ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0110 19:49:11.332524  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.332680  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6
I0110 19:49:11.332695  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6
I0110 19:49:11.332769  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.332818  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.334252  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.520883ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0110 19:49:11.334931  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6/status: (1.898717ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.335185  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.596979ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0110 19:49:11.335968  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.30183ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0110 19:49:11.336992  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (3.755521ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0110 19:49:11.338397  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.829408ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0110 19:49:11.338646  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.951812ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.339074  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.339318  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10
I0110 19:49:11.339333  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10
I0110 19:49:11.339414  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.339458  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.341222  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.133565ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0110 19:49:11.341427  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.274627ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0110 19:49:11.342058  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (2.237981ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0110 19:49:11.342741  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10/status: (2.97337ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0110 19:49:11.343628  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.835554ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0110 19:49:11.344955  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (1.2081ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0110 19:49:11.345312  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.345624  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:11.346145  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13
I0110 19:49:11.346117  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.59083ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0110 19:49:11.346352  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.347158  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.348457  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.498833ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39590]
I0110 19:49:11.348803  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (2.04959ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0110 19:49:11.348816  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.029126ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0110 19:49:11.350469  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13/status: (1.723732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39590]
I0110 19:49:11.351641  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.377286ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0110 19:49:11.352296  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (1.294386ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39592]
I0110 19:49:11.352547  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.352719  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:11.352733  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15
I0110 19:49:11.352796  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.352836  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.354291  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.83362ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0110 19:49:11.354770  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (1.70847ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0110 19:49:11.355070  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.691822ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0110 19:49:11.355728  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15/status: (2.677809ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39592]
I0110 19:49:11.357947  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.814047ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0110 19:49:11.357958  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (1.826619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0110 19:49:11.358629  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.358913  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:11.358931  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18
I0110 19:49:11.359037  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.359082  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.360643  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.043863ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0110 19:49:11.361135  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.441667ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0110 19:49:11.362158  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18/status: (2.527052ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0110 19:49:11.362812  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.576761ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0110 19:49:11.363522  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (4.156782ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0110 19:49:11.363761  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (1.022222ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0110 19:49:11.364003  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.364176  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:11.364195  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20
I0110 19:49:11.364314  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.364368  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.365208  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.822141ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0110 19:49:11.367150  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.549918ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0110 19:49:11.367274  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.193274ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39600]
I0110 19:49:11.367577  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20/status: (2.627113ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0110 19:49:11.367963  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (2.152568ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0110 19:49:11.369141  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.138137ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0110 19:49:11.369166  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.421782ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0110 19:49:11.369564  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.369737  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:11.369759  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22
I0110 19:49:11.369876  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.369924  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.372294  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.7587ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39602]
I0110 19:49:11.372402  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.379425ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39604]
I0110 19:49:11.372944  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22/status: (2.803469ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39600]
I0110 19:49:11.373859  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (4.074862ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0110 19:49:11.374806  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.359992ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39604]
I0110 19:49:11.375101  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.375268  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:11.375282  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25
I0110 19:49:11.375354  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.375407  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.376826  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.49986ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0110 19:49:11.377993  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25/status: (2.343983ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39604]
I0110 19:49:11.378188  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (2.242159ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39602]
I0110 19:49:11.379713  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.344955ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39604]
I0110 19:49:11.379932  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.379956  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.138953ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0110 19:49:11.380057  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:11.380073  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:11.380149  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.380199  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.380481  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (4.382289ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0110 19:49:11.382365  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26/status: (1.868892ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39602]
I0110 19:49:11.382393  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.933457ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39604]
I0110 19:49:11.382543  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.565037ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0110 19:49:11.382747  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (2.013286ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0110 19:49:11.384070  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.12403ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39604]
I0110 19:49:11.384462  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.384482  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.546633ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39602]
I0110 19:49:11.384652  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:11.384721  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:11.384839  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.384914  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.386343  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.192782ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0110 19:49:11.386995  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.957425ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39604]
I0110 19:49:11.387427  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.443911ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0110 19:49:11.387995  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28/status: (2.079844ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0110 19:49:11.389095  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.655023ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39604]
I0110 19:49:11.389835  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.358884ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0110 19:49:11.390107  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.390300  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:11.390320  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26
I0110 19:49:11.390421  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.390477  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.391631  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.083609ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39604]
I0110 19:49:11.392383  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.571198ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0110 19:49:11.392952  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26/status: (2.00749ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0110 19:49:11.394048  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (1.879561ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39604]
I0110 19:49:11.395209  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.62121ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0110 19:49:11.396534  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.397022  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-26.157894807159823a: (5.619134ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0110 19:49:11.398547  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.837925ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39604]
I0110 19:49:11.399145  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:11.399165  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:11.399290  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.399383  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.403884  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32/status: (3.164905ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0110 19:49:11.404467  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (5.169111ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0110 19:49:11.404995  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (4.820783ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39616]
I0110 19:49:11.405071  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.835236ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39618]
I0110 19:49:11.407253  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (1.916822ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0110 19:49:11.407614  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.407636  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.104025ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39618]
I0110 19:49:11.407865  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:11.407941  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:11.408201  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.408313  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.410167  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (1.187912ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39620]
I0110 19:49:11.411738  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.274526ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0110 19:49:11.412610  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35/status: (3.827844ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0110 19:49:11.413051  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.461555ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39620]
I0110 19:49:11.415993  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.298813ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0110 19:49:11.417004  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (3.250427ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39620]
I0110 19:49:11.417516  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.417726  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:11.417747  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:11.417839  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.417897  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.419975  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.767213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0110 19:49:11.420648  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.889696ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0110 19:49:11.421950  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37/status: (3.782674ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39620]
I0110 19:49:11.421952  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (4.627266ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0110 19:49:11.426351  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (3.793849ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0110 19:49:11.426841  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.426351  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.79618ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0110 19:49:11.427015  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:11.427037  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:11.427159  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.427208  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.430738  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.753627ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0110 19:49:11.431678  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39/status: (2.779061ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0110 19:49:11.433787  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.751943ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0110 19:49:11.434076  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.434519  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:11.434567  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:11.434601  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.233804ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0110 19:49:11.434753  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.434798  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.437313  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (2.31051ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0110 19:49:11.441526  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.616916ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0110 19:49:11.446410  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41/status: (6.121947ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0110 19:49:11.446545  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (4.388766ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0110 19:49:11.449975  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (3.079824ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0110 19:49:11.450653  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.450856  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:11.450943  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:11.451122  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.451193  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.450723  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.071029ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0110 19:49:11.455679  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43/status: (4.154885ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0110 19:49:11.457270  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.304927ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0110 19:49:11.459800  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.44184ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0110 19:49:11.460066  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.460245  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:11.460261  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:11.460469  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.460826  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.463794  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (5.954555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0110 19:49:11.466880  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (4.277347ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39626]
I0110 19:49:11.467154  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (4.967718ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39636]
I0110 19:49:11.467277  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (3.368702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39630]
I0110 19:49:11.467689  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods: (3.495597ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39634]
I0110 19:49:11.468091  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47/status: (3.429453ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0110 19:49:11.469328  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (5.497701ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0110 19:49:11.488089  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (19.618986ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0110 19:49:11.488593  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.488803  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:11.488817  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:11.488915  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.488962  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.491158  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.606219ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0110 19:49:11.494567  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48/status: (4.065073ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0110 19:49:11.496966  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (1.658639ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0110 19:49:11.497387  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.497696  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:11.497753  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:11.497898  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.497965  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.500749  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (6.444038ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0110 19:49:11.502252  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49/status: (3.616423ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0110 19:49:11.504473  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.771985ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0110 19:49:11.504875  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.530147ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0110 19:49:11.505532  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.505882  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:11.505895  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48
I0110 19:49:11.506018  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.506060  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.510339  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (3.628166ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39638]
I0110 19:49:11.511427  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.459153ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0110 19:49:11.511733  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48/status: (4.593031ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0110 19:49:11.514528  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.921832ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0110 19:49:11.514899  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-48: (2.759848ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0110 19:49:11.515548  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.515729  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:11.515740  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49
I0110 19:49:11.515860  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.515900  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.516752  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (4.823452ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0110 19:49:11.520804  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (3.468192ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39638]
I0110 19:49:11.521383  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49/status: (3.632251ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0110 19:49:11.524185  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-49: (1.696087ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0110 19:49:11.524518  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (5.950411ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0110 19:49:11.524783  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.525130  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:11.525171  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47
I0110 19:49:11.525300  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.525367  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.527305  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.62333ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39638]
I0110 19:49:11.529606  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47/status: (3.493307ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39640]
I0110 19:49:11.529922  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-48.1578948077d52b6f: (4.271552ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0110 19:49:11.532065  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-47: (1.944802ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39640]
I0110 19:49:11.532421  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.532599  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:11.532612  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43
I0110 19:49:11.532682  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.532730  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.534417  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (1.218696ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39638]
I0110 19:49:11.536665  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43/status: (2.281648ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39640]
I0110 19:49:11.536986  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-49.15789480785e8373: (5.336674ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0110 19:49:11.539306  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-43: (2.256039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39640]
I0110 19:49:11.539670  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.540124  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:11.540135  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41
I0110 19:49:11.540214  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.540263  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.541984  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (1.360245ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39638]
I0110 19:49:11.543955  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41/status: (2.973051ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39640]
I0110 19:49:11.545869  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-47.1578948076236b7c: (8.267848ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0110 19:49:11.550030  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-43.157894807594da28: (3.265507ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0110 19:49:11.554858  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-41.15789480749ab295: (3.869488ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0110 19:49:11.557221  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-41: (12.616542ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39640]
I0110 19:49:11.557820  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.558019  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:11.558044  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46
I0110 19:49:11.558173  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.558244  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.566979  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46/status: (7.9441ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39638]
I0110 19:49:11.567545  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (8.996242ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0110 19:49:11.566984  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.784039ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0110 19:49:11.569912  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-46: (1.53981ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0110 19:49:11.570337  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.570581  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:11.570630  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45
I0110 19:49:11.571838  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.571933  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.573851  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (3.067918ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0110 19:49:11.577377  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.975018ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0110 19:49:11.578464  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45/status: (5.535864ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0110 19:49:11.579875  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (6.944743ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39644]
I0110 19:49:11.581778  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-45: (2.878125ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0110 19:49:11.582141  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.582351  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:11.582380  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:11.582573  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.582654  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.585982  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (2.877384ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39644]
I0110 19:49:11.586555  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.456036ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39646]
I0110 19:49:11.589708  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44/status: (3.011575ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39644]
I0110 19:49:11.592362  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.92035ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39646]
I0110 19:49:11.592780  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.593016  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:11.593050  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39
I0110 19:49:11.593317  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.593409  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.596864  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39/status: (2.63677ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39648]
I0110 19:49:11.597330  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (2.175565ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39646]
I0110 19:49:11.600060  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-39: (1.424302ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39650]
I0110 19:49:11.600390  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.600574  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-39.157894807426e1d3: (2.769624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39646]
I0110 19:49:11.600628  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:11.600651  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44
I0110 19:49:11.600815  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.600890  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.603196  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.583044ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39650]
I0110 19:49:11.603836  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44/status: (2.278471ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39648]
I0110 19:49:11.607213  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-44.157894807d6ac5d9: (5.496422ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0110 19:49:11.609729  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-44: (1.531723ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0110 19:49:11.610175  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.610506  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:11.610547  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:11.610684  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.610756  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.614245  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42/status: (3.00588ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0110 19:49:11.615096  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (3.911067ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39650]
I0110 19:49:11.616224  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (1.3385ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0110 19:49:11.616477  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.616741  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:11.616758  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40
I0110 19:49:11.616903  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.616947  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.618079  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (1.78374ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39650]
I0110 19:49:11.622445  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.982961ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39650]
I0110 19:49:11.622454  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40/status: (4.340342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39654]
I0110 19:49:11.622919  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (5.174671ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0110 19:49:11.625416  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-40: (1.437524ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0110 19:49:11.625754  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.625924  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:11.625940  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42
I0110 19:49:11.626078  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.626122  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.628219  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (1.233189ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0110 19:49:11.630038  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42/status: (3.10884ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39650]
I0110 19:49:11.631689  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-42: (1.19986ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39650]
I0110 19:49:11.632079  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.632268  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:11.632299  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37
I0110 19:49:11.632391  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.632429  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.634780  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37/status: (2.081306ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39650]
I0110 19:49:11.635552  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (2.759784ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0110 19:49:11.637202  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-37: (1.320536ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39650]
I0110 19:49:11.637444  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.637630  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:11.637645  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:11.637734  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.637770  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.639719  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.149919ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0110 19:49:11.640863  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-42.157894807f179868: (12.642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39656]
I0110 19:49:11.644571  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-37.157894807398a556: (3.104079ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39656]
I0110 19:49:11.645764  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38/status: (7.703835ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39650]
I0110 19:49:11.648516  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.734342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39650]
I0110 19:49:11.648843  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.649045  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:11.649081  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35
I0110 19:49:11.649194  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.649271  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.658701  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35/status: (8.700152ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39650]
I0110 19:49:11.659176  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (9.502199ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0110 19:49:11.663257  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-35: (3.397634ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39650]
I0110 19:49:11.663769  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (17.637415ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39656]
I0110 19:49:11.664104  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.666639  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:11.666668  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38
I0110 19:49:11.666807  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.666865  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.670062  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (2.710358ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0110 19:49:11.672413  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-35.15789480730630b8: (4.324041ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39656]
I0110 19:49:11.678151  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-38.1578948080b3d55c: (4.847902ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39656]
I0110 19:49:11.678541  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38/status: (10.726727ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39650]
I0110 19:49:11.678583  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/preemptor-pod: (3.915439ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0110 19:49:11.678960  121338 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I0110 19:49:11.680750  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-38: (1.269809ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0110 19:49:11.681114  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-0: (1.980075ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39656]
I0110 19:49:11.681642  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.684013  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:11.684039  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32
I0110 19:49:11.684192  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.684271  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.686286  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-1: (4.499499ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39656]
I0110 19:49:11.690203  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (3.269696ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39656]
I0110 19:49:11.690546  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32/status: (4.925471ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0110 19:49:11.690881  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-2: (3.89994ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39662]
I0110 19:49:11.693166  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-32.15789480727e1e76: (6.383612ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39660]
I0110 19:49:11.693179  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-3: (1.333795ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39656]
I0110 19:49:11.694884  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (2.964378ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39658]
I0110 19:49:11.695202  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.695517  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-4: (1.948824ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39656]
I0110 19:49:11.696052  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:11.696080  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:11.696216  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.696276  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.699706  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36/status: (2.520802ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39660]
I0110 19:49:11.700072  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (2.897229ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39664]
I0110 19:49:11.701920  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (4.22584ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39666]
I0110 19:49:11.702262  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.328406ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39660]
I0110 19:49:11.702748  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.702880  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:11.702918  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:11.703039  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.703124  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.703729  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-5: (1.353462ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39658]
I0110 19:49:11.705679  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.62818ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39664]
I0110 19:49:11.706832  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (2.131972ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39668]
I0110 19:49:11.706882  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-6: (1.873436ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39658]
I0110 19:49:11.708539  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-7: (1.225846ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39658]
I0110 19:49:11.709706  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34/status: (5.501941ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39666]
I0110 19:49:11.710746  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-8: (1.700619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39658]
I0110 19:49:11.712702  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (2.313929ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39666]
I0110 19:49:11.712952  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.713859  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-9: (2.720637ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39658]
I0110 19:49:11.715707  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-10: (1.431095ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39666]
I0110 19:49:11.717945  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-11: (1.867232ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39666]
I0110 19:49:11.719018  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:11.719034  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:11.719286  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.719349  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.719941  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-12: (1.412876ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39666]
I0110 19:49:11.722139  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (2.502442ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39664]
I0110 19:49:11.722820  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-13: (2.291438ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39666]
I0110 19:49:11.722952  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:11.723330  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36/status: (3.384616ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39670]
I0110 19:49:11.724175  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:11.724904  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-36.15789480843089e3: (4.918062ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39672]
I0110 19:49:11.724972  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:11.725385  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:11.726294  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.80462ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39670]
I0110 19:49:11.726658  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-14: (2.145698ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39666]
I0110 19:49:11.727246  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.728029  121338 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0110 19:49:11.728099  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:11.728110  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34
I0110 19:49:11.728223  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.728288  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.736465  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (6.484644ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39664]
I0110 19:49:11.736992  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-15: (9.604043ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39672]
I0110 19:49:11.737270  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34/status: (7.266626ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39670]
I0110 19:49:11.739666  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-34.157894808498ec75: (10.318945ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39674]
I0110 19:49:11.740722  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-16: (1.888215ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39672]
I0110 19:49:11.741003  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.318806ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39670]
I0110 19:49:11.741280  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.741622  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:11.741634  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33
I0110 19:49:11.741736  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.741796  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.743588  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-17: (2.485128ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39674]
I0110 19:49:11.747551  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.264419ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39674]
I0110 19:49:11.747824  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33/status: (4.166967ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39664]
I0110 19:49:11.748096  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (4.892626ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39670]
I0110 19:49:11.748253  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-18: (3.212381ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39678]
I0110 19:49:11.752338  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-19: (3.595195ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39678]
I0110 19:49:11.752443  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (3.87167ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39674]
I0110 19:49:11.752848  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.753158  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:11.753202  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:11.753368  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.753451  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.754440  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-20: (1.607667ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39678]
I0110 19:49:11.755095  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.324619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39676]
I0110 19:49:11.757525  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31/status: (2.784105ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39682]
I0110 19:49:11.758089  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-21: (2.665633ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39676]
I0110 19:49:11.759217  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.236748ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39682]
I0110 19:49:11.759315  121338 wrap.go:47] POST /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events: (3.830225ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39680]
I0110 19:49:11.759555  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.759769  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-22: (1.191424ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39676]
I0110 19:49:11.759790  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:11.759803  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28
I0110 19:49:11.759892  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.759929  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.761245  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-23: (1.056865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39678]
I0110 19:49:11.763274  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-28.1578948071a15130: (2.437661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39686]
I0110 19:49:11.765042  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-24: (3.252959ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39678]
I0110 19:49:11.765063  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28/status: (4.780299ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39682]
I0110 19:49:11.766548  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (5.694208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39684]
I0110 19:49:11.766979  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-25: (1.487417ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39678]
I0110 19:49:11.767321  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.77975ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39686]
I0110 19:49:11.767639  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.767810  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:11.767827  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31
I0110 19:49:11.767950  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.768001  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.769037  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-26: (1.55543ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39678]
I0110 19:49:11.770652  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-27: (1.183718ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39688]
I0110 19:49:11.770679  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (2.233832ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39686]
I0110 19:49:11.772389  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-28: (1.288512ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39686]
I0110 19:49:11.774645  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-29: (1.732545ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39686]
I0110 19:49:11.775470  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-31.157894808798cef2: (6.883192ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39684]
I0110 19:49:11.775671  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31/status: (2.733439ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39678]
I0110 19:49:11.777223  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-30: (2.193409ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39686]
I0110 19:49:11.779076  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (1.461882ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39686]
I0110 19:49:11.779865  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-31: (3.72904ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39678]
I0110 19:49:11.780185  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.780408  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:11.780434  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36
I0110 19:49:11.780560  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.780632  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.782115  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-32: (2.32095ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39686]
I0110 19:49:11.782695  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.369064ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39678]
I0110 19:49:11.783059  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36/status: (2.172102ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39688]
I0110 19:49:11.784044  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-33: (1.271688ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39686]
I0110 19:49:11.785327  121338 wrap.go:47] PATCH /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/events/ppod-36.15789480843089e3: (3.628588ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39690]
I0110 19:49:11.786148  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-34: (1.217065ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39686]
I0110 19:49:11.786265  121338 wrap.go:47] GET /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/pods/ppod-36: (1.325482ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39688]
I0110 19:49:11.786563  121338 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0110 19:49:11.786779  121338 scheduling_queue.go:821] About to try and schedule pod preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:11.786802  121338 scheduler.go:454] Attempting to schedule pod: preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30
I0110 19:49:11.786911  121338 factory.go:1070] Unable to schedule preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0110 19:49:11.786961  121338 factory.go:1175] Updating pod condition for preemption-raceca1e3aa9-1510-11e9-acdb-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0110 19:49:11.789392  121338 wrap.go:47] PUT /api/v1/namespaces/preemption-raceca1e3aa9-1510-11e9-acdb-0242a