This job view page is being replaced by Spyglass soon. Check out the new job view.
PRjiayingz: Update nvidia-gpu-device-plugin addon.
ResultFAILURE
Tests 1 failed / 622 succeeded
Started2019-02-11 23:54
Elapsed27m33s
Revision
Buildergke-prow-containerd-pool-99179761-xlfp
Refs master:805a9e70
73940:52e92ab4
pod50c850f8-2e58-11e9-aa96-0a580a6c0714
infra-commit49d8112d8
pod50c850f8-2e58-11e9-aa96-0a580a6c0714
repok8s.io/kubernetes
repo-commitcb059fb69b122f6fd0f92e3effe568d61f4fc3bd
repos{u'k8s.io/kubernetes': u'master:805a9e703698d0a8a86f405f861f9e3fd91b29c6,73940:52e92ab4b9f4d1e868c96090c49485edfad4d72d'}

Test Failures


k8s.io/kubernetes/test/integration/scheduler TestPreemptionRaces 22s

go test -v k8s.io/kubernetes/test/integration/scheduler -run TestPreemptionRaces$
I0212 00:14:43.232529  123634 services.go:33] Network range for service cluster IPs is unspecified. Defaulting to {10.0.0.0 ffffff00}.
I0212 00:14:43.232586  123634 services.go:45] Setting service IP to "10.0.0.1" (read-write).
I0212 00:14:43.232599  123634 master.go:272] Node port range unspecified. Defaulting to 30000-32767.
I0212 00:14:43.232610  123634 master.go:228] Using reconciler: 
I0212 00:14:43.234478  123634 storage_factory.go:285] storing podtemplates in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.234656  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.234686  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.234742  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.234811  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.235247  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.235314  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.235872  123634 store.go:1310] Monitoring podtemplates count at <storage-prefix>//podtemplates
I0212 00:14:43.235943  123634 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.236012  123634 reflector.go:170] Listing and watching *core.PodTemplate from storage/cacher.go:/podtemplates
I0212 00:14:43.236184  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.236213  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.236268  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.236322  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.236656  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.236698  123634 store.go:1310] Monitoring events count at <storage-prefix>//events
I0212 00:14:43.236724  123634 storage_factory.go:285] storing limitranges in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.236756  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.236788  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.236818  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.236852  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.236973  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.237380  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.237463  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.237792  123634 store.go:1310] Monitoring limitranges count at <storage-prefix>//limitranges
I0212 00:14:43.237827  123634 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.237844  123634 reflector.go:170] Listing and watching *core.LimitRange from storage/cacher.go:/limitranges
I0212 00:14:43.237897  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.237910  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.237939  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.237990  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.238283  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.238514  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.238620  123634 store.go:1310] Monitoring resourcequotas count at <storage-prefix>//resourcequotas
I0212 00:14:43.238688  123634 reflector.go:170] Listing and watching *core.ResourceQuota from storage/cacher.go:/resourcequotas
I0212 00:14:43.238777  123634 storage_factory.go:285] storing secrets in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.238846  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.238859  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.238901  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.238947  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.239193  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.239440  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.239496  123634 store.go:1310] Monitoring secrets count at <storage-prefix>//secrets
I0212 00:14:43.239566  123634 reflector.go:170] Listing and watching *core.Secret from storage/cacher.go:/secrets
I0212 00:14:43.239678  123634 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.239764  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.239792  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.239825  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.239896  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.240331  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.240379  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.240672  123634 store.go:1310] Monitoring persistentvolumes count at <storage-prefix>//persistentvolumes
I0212 00:14:43.240702  123634 reflector.go:170] Listing and watching *core.PersistentVolume from storage/cacher.go:/persistentvolumes
I0212 00:14:43.240809  123634 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.240875  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.240887  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.240915  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.240952  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.241282  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.241315  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.241613  123634 store.go:1310] Monitoring persistentvolumeclaims count at <storage-prefix>//persistentvolumeclaims
I0212 00:14:43.241640  123634 reflector.go:170] Listing and watching *core.PersistentVolumeClaim from storage/cacher.go:/persistentvolumeclaims
I0212 00:14:43.241732  123634 storage_factory.go:285] storing configmaps in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.241796  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.241808  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.241837  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.241885  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.242206  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.242350  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.242538  123634 store.go:1310] Monitoring configmaps count at <storage-prefix>//configmaps
I0212 00:14:43.242647  123634 reflector.go:170] Listing and watching *core.ConfigMap from storage/cacher.go:/configmaps
I0212 00:14:43.242685  123634 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.242749  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.242761  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.242789  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.242822  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.243098  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.243425  123634 store.go:1310] Monitoring namespaces count at <storage-prefix>//namespaces
I0212 00:14:43.243591  123634 storage_factory.go:285] storing endpoints in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.243616  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.243648  123634 reflector.go:170] Listing and watching *core.Namespace from storage/cacher.go:/namespaces
I0212 00:14:43.243659  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.243670  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.243699  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.243746  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.244003  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.244074  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.244731  123634 store.go:1310] Monitoring endpoints count at <storage-prefix>//endpoints
I0212 00:14:43.244809  123634 reflector.go:170] Listing and watching *core.Endpoints from storage/cacher.go:/endpoints
I0212 00:14:43.244883  123634 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.244966  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.244990  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.245031  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.245108  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.245791  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.245858  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.246200  123634 store.go:1310] Monitoring nodes count at <storage-prefix>//nodes
I0212 00:14:43.246295  123634 reflector.go:170] Listing and watching *core.Node from storage/cacher.go:/nodes
I0212 00:14:43.246349  123634 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.246444  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.246469  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.246520  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.246603  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.246860  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.246956  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.247342  123634 store.go:1310] Monitoring pods count at <storage-prefix>//pods
I0212 00:14:43.247415  123634 reflector.go:170] Listing and watching *core.Pod from storage/cacher.go:/pods
I0212 00:14:43.247470  123634 storage_factory.go:285] storing serviceaccounts in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.247597  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.247619  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.247648  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.247702  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.248153  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.248498  123634 store.go:1310] Monitoring serviceaccounts count at <storage-prefix>//serviceaccounts
I0212 00:14:43.248665  123634 reflector.go:170] Listing and watching *core.ServiceAccount from storage/cacher.go:/serviceaccounts
I0212 00:14:43.248708  123634 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.248786  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.248815  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.248848  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.248910  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.248512  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.249142  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.249398  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.249571  123634 store.go:1310] Monitoring services count at <storage-prefix>//services
I0212 00:14:43.249618  123634 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.249657  123634 reflector.go:170] Listing and watching *core.Service from storage/cacher.go:/services
I0212 00:14:43.249736  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.249760  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.249820  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.249872  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.250122  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.250162  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.250226  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.250248  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.250287  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.250363  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.251530  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.251584  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.251737  123634 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.251821  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.251836  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.251862  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.251907  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.252128  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.252209  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.252449  123634 store.go:1310] Monitoring replicationcontrollers count at <storage-prefix>//replicationcontrollers
I0212 00:14:43.252801  123634 reflector.go:170] Listing and watching *core.ReplicationController from storage/cacher.go:/replicationcontrollers
I0212 00:14:43.263926  123634 master.go:407] Skipping disabled API group "auditregistration.k8s.io".
I0212 00:14:43.263966  123634 master.go:415] Enabling API group "authentication.k8s.io".
I0212 00:14:43.263981  123634 master.go:415] Enabling API group "authorization.k8s.io".
I0212 00:14:43.264148  123634 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.264288  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.264315  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.264361  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.264429  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.264814  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.264901  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.265152  123634 store.go:1310] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0212 00:14:43.265208  123634 reflector.go:170] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0212 00:14:43.265298  123634 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.265405  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.265446  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.265512  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.265609  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.265826  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.265873  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.266207  123634 store.go:1310] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0212 00:14:43.266348  123634 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.266397  123634 reflector.go:170] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0212 00:14:43.266430  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.266445  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.266471  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.266540  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.266876  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.266941  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.267158  123634 store.go:1310] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0212 00:14:43.267177  123634 master.go:415] Enabling API group "autoscaling".
I0212 00:14:43.267198  123634 reflector.go:170] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0212 00:14:43.267297  123634 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.267358  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.267369  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.267396  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.267892  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.268233  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.268260  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.268722  123634 store.go:1310] Monitoring jobs.batch count at <storage-prefix>//jobs
I0212 00:14:43.268861  123634 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.268910  123634 reflector.go:170] Listing and watching *batch.Job from storage/cacher.go:/jobs
I0212 00:14:43.268947  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.268960  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.268992  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.269032  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.269321  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.269652  123634 store.go:1310] Monitoring cronjobs.batch count at <storage-prefix>//cronjobs
I0212 00:14:43.269686  123634 master.go:415] Enabling API group "batch".
I0212 00:14:43.269836  123634 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.269913  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.269938  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.269983  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.270090  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.270163  123634 reflector.go:170] Listing and watching *batch.CronJob from storage/cacher.go:/cronjobs
I0212 00:14:43.270351  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.270612  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.270841  123634 store.go:1310] Monitoring certificatesigningrequests.certificates.k8s.io count at <storage-prefix>//certificatesigningrequests
I0212 00:14:43.270861  123634 master.go:415] Enabling API group "certificates.k8s.io".
I0212 00:14:43.270980  123634 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.271063  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.271077  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.271105  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.271174  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.271204  123634 reflector.go:170] Listing and watching *certificates.CertificateSigningRequest from storage/cacher.go:/certificatesigningrequests
I0212 00:14:43.271332  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.271569  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.271617  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.271793  123634 store.go:1310] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I0212 00:14:43.271914  123634 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.271971  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.271984  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.272010  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.272072  123634 reflector.go:170] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I0212 00:14:43.272183  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.272419  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.272697  123634 store.go:1310] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I0212 00:14:43.272714  123634 master.go:415] Enabling API group "coordination.k8s.io".
I0212 00:14:43.272854  123634 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.272919  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.272931  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.272960  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.273031  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.273063  123634 reflector.go:170] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I0212 00:14:43.273205  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.275567  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.275713  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.275813  123634 store.go:1310] Monitoring replicationcontrollers count at <storage-prefix>//replicationcontrollers
I0212 00:14:43.275875  123634 reflector.go:170] Listing and watching *core.ReplicationController from storage/cacher.go:/replicationcontrollers
I0212 00:14:43.275967  123634 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.276058  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.276074  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.276103  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.276155  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.276596  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.276637  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.277097  123634 store.go:1310] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I0212 00:14:43.277137  123634 reflector.go:170] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I0212 00:14:43.277245  123634 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.277311  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.277325  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.277355  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.277403  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.277862  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.277923  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.278282  123634 store.go:1310] Monitoring deployments.apps count at <storage-prefix>//deployments
I0212 00:14:43.278325  123634 reflector.go:170] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0212 00:14:43.278427  123634 storage_factory.go:285] storing ingresses.extensions in extensions/v1beta1, reading as extensions/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.278512  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.278527  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.278585  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.278626  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.278901  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.279147  123634 store.go:1310] Monitoring ingresses.extensions count at <storage-prefix>//ingresses
I0212 00:14:43.279190  123634 reflector.go:170] Listing and watching *extensions.Ingress from storage/cacher.go:/ingresses
I0212 00:14:43.279148  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.279292  123634 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.279363  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.279374  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.279402  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.279462  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.279836  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.280115  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.280181  123634 store.go:1310] Monitoring podsecuritypolicies.policy count at <storage-prefix>//podsecuritypolicies
I0212 00:14:43.280236  123634 reflector.go:170] Listing and watching *policy.PodSecurityPolicy from storage/cacher.go:/podsecuritypolicies
I0212 00:14:43.280308  123634 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.280371  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.280380  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.280400  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.280459  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.280744  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.280794  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.281138  123634 store.go:1310] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I0212 00:14:43.281215  123634 reflector.go:170] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I0212 00:14:43.281279  123634 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.281359  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.281382  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.281421  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.281505  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.281832  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.281896  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.282095  123634 store.go:1310] Monitoring networkpolicies.networking.k8s.io count at <storage-prefix>//networkpolicies
I0212 00:14:43.282131  123634 master.go:415] Enabling API group "extensions".
I0212 00:14:43.282143  123634 reflector.go:170] Listing and watching *networking.NetworkPolicy from storage/cacher.go:/networkpolicies
I0212 00:14:43.282252  123634 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.282337  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.282363  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.282405  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.282455  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.282707  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.282779  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.283073  123634 store.go:1310] Monitoring networkpolicies.networking.k8s.io count at <storage-prefix>//networkpolicies
I0212 00:14:43.283117  123634 master.go:415] Enabling API group "networking.k8s.io".
I0212 00:14:43.283172  123634 reflector.go:170] Listing and watching *networking.NetworkPolicy from storage/cacher.go:/networkpolicies
I0212 00:14:43.283271  123634 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.283348  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.283363  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.283410  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.283461  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.283772  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.283827  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.284297  123634 store.go:1310] Monitoring poddisruptionbudgets.policy count at <storage-prefix>//poddisruptionbudgets
I0212 00:14:43.284377  123634 reflector.go:170] Listing and watching *policy.PodDisruptionBudget from storage/cacher.go:/poddisruptionbudgets
I0212 00:14:43.284432  123634 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.284531  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.284572  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.284606  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.284670  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.290783  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.290874  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.291329  123634 store.go:1310] Monitoring podsecuritypolicies.policy count at <storage-prefix>//podsecuritypolicies
I0212 00:14:43.291402  123634 master.go:415] Enabling API group "policy".
I0212 00:14:43.291453  123634 reflector.go:170] Listing and watching *policy.PodSecurityPolicy from storage/cacher.go:/podsecuritypolicies
I0212 00:14:43.291505  123634 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.291633  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.291676  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.291722  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.291805  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.292112  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.292187  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.292521  123634 store.go:1310] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I0212 00:14:43.292625  123634 reflector.go:170] Listing and watching *rbac.Role from storage/cacher.go:/roles
I0212 00:14:43.292785  123634 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.293010  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.293032  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.293065  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.293108  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.293330  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.293422  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.293658  123634 store.go:1310] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I0212 00:14:43.293715  123634 reflector.go:170] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I0212 00:14:43.293702  123634 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.293923  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.293946  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.293989  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.294133  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.296143  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.296214  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.296404  123634 store.go:1310] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I0212 00:14:43.296478  123634 reflector.go:170] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I0212 00:14:43.296586  123634 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.296675  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.296700  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.296746  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.296853  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.297118  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.297208  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.297613  123634 store.go:1310] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I0212 00:14:43.297649  123634 reflector.go:170] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I0212 00:14:43.297712  123634 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.297822  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.297844  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.297877  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.297928  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.298179  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.298305  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.298396  123634 store.go:1310] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I0212 00:14:43.298450  123634 reflector.go:170] Listing and watching *rbac.Role from storage/cacher.go:/roles
I0212 00:14:43.298629  123634 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.298746  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.298774  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.298812  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.298991  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.299329  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.299432  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.299647  123634 store.go:1310] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I0212 00:14:43.299698  123634 reflector.go:170] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I0212 00:14:43.299692  123634 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.299853  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.299887  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.299946  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.300048  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.300422  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.300566  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.300868  123634 store.go:1310] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I0212 00:14:43.300955  123634 reflector.go:170] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I0212 00:14:43.301091  123634 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.301191  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.301214  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.301256  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.301405  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.301660  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.301857  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.301891  123634 store.go:1310] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I0212 00:14:43.301919  123634 master.go:415] Enabling API group "rbac.authorization.k8s.io".
I0212 00:14:43.301964  123634 reflector.go:170] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I0212 00:14:43.304008  123634 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1beta1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.304106  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.304174  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.304222  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.304298  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.304624  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.304719  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.305016  123634 store.go:1310] Monitoring priorityclasses.scheduling.k8s.io count at <storage-prefix>//priorityclasses
I0212 00:14:43.305044  123634 master.go:415] Enabling API group "scheduling.k8s.io".
I0212 00:14:43.305098  123634 reflector.go:170] Listing and watching *scheduling.PriorityClass from storage/cacher.go:/priorityclasses
I0212 00:14:43.305288  123634 master.go:407] Skipping disabled API group "settings.k8s.io".
I0212 00:14:43.305433  123634 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.305595  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.305879  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.305982  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.306066  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.306328  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.306404  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.306623  123634 store.go:1310] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I0212 00:14:43.306663  123634 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.306729  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.306742  123634 reflector.go:170] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I0212 00:14:43.306766  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.306855  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.306904  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.307167  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.307236  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.307526  123634 store.go:1310] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I0212 00:14:43.307619  123634 reflector.go:170] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I0212 00:14:43.307806  123634 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.307899  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.307909  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.307964  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.308083  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.308684  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.308985  123634 store.go:1310] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I0212 00:14:43.309036  123634 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.309114  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.309137  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.309176  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.309264  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.309301  123634 reflector.go:170] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I0212 00:14:43.309431  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.309883  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.310097  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.310600  123634 store.go:1310] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I0212 00:14:43.310633  123634 master.go:415] Enabling API group "storage.k8s.io".
I0212 00:14:43.310774  123634 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.310833  123634 reflector.go:170] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I0212 00:14:43.310854  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.310866  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.310903  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.311037  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.311238  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.311670  123634 store.go:1310] Monitoring deployments.apps count at <storage-prefix>//deployments
I0212 00:14:43.311810  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.311903  123634 reflector.go:170] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0212 00:14:43.311907  123634 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.312091  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.312130  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.312174  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.312302  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.312975  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.313006  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.313454  123634 store.go:1310] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I0212 00:14:43.313614  123634 reflector.go:170] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I0212 00:14:43.313683  123634 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.313779  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.313791  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.313818  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.313872  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.314281  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.314397  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.314812  123634 store.go:1310] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I0212 00:14:43.314869  123634 reflector.go:170] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I0212 00:14:43.314961  123634 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.315041  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.315054  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.315095  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.315138  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.315607  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.315660  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.315929  123634 store.go:1310] Monitoring deployments.apps count at <storage-prefix>//deployments
I0212 00:14:43.316037  123634 reflector.go:170] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0212 00:14:43.316102  123634 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.316217  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.316230  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.316299  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.316335  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.316745  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.317053  123634 store.go:1310] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I0212 00:14:43.317232  123634 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.317310  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.317323  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.317352  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.317429  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.317455  123634 reflector.go:170] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I0212 00:14:43.317640  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.317939  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.318199  123634 store.go:1310] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I0212 00:14:43.318360  123634 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.318431  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.318444  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.318466  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.318471  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.318514  123634 reflector.go:170] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I0212 00:14:43.318643  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.318923  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.319226  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.319244  123634 store.go:1310] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I0212 00:14:43.319291  123634 reflector.go:170] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I0212 00:14:43.319382  123634 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.319474  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.319512  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.319587  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.319781  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.320499  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.320636  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.320763  123634 store.go:1310] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I0212 00:14:43.320852  123634 reflector.go:170] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I0212 00:14:43.320944  123634 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.321136  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.321164  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.321199  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.321351  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.321661  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.321738  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.321897  123634 store.go:1310] Monitoring deployments.apps count at <storage-prefix>//deployments
I0212 00:14:43.321941  123634 reflector.go:170] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0212 00:14:43.322047  123634 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.322143  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.322173  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.322212  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.322284  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.322624  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.322769  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.322894  123634 store.go:1310] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I0212 00:14:43.322923  123634 reflector.go:170] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I0212 00:14:43.323036  123634 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.323110  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.323132  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.323162  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.323215  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.323425  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.323514  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.323683  123634 store.go:1310] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I0212 00:14:43.323804  123634 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.323860  123634 reflector.go:170] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I0212 00:14:43.323888  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.323898  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.323924  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.324853  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.325077  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.325182  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.325294  123634 store.go:1310] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I0212 00:14:43.325383  123634 reflector.go:170] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I0212 00:14:43.325443  123634 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.325568  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.325595  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.325625  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.325678  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.325918  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.326109  123634 store.go:1310] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I0212 00:14:43.326137  123634 master.go:415] Enabling API group "apps".
I0212 00:14:43.326169  123634 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.326211  123634 reflector.go:170] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I0212 00:14:43.326232  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.326252  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.326297  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.326113  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.326383  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.326661  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.326836  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.326839  123634 store.go:1310] Monitoring validatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//validatingwebhookconfigurations
I0212 00:14:43.326857  123634 reflector.go:170] Listing and watching *admissionregistration.ValidatingWebhookConfiguration from storage/cacher.go:/validatingwebhookconfigurations
I0212 00:14:43.326890  123634 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.326988  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.327013  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.327059  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.327176  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.327581  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.327731  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.327812  123634 store.go:1310] Monitoring mutatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//mutatingwebhookconfigurations
I0212 00:14:43.327834  123634 master.go:415] Enabling API group "admissionregistration.k8s.io".
I0212 00:14:43.327875  123634 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"e6f02706-8e02-4007-9275-a272efff67ce", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0212 00:14:43.327910  123634 reflector.go:170] Listing and watching *admissionregistration.MutatingWebhookConfiguration from storage/cacher.go:/mutatingwebhookconfigurations
I0212 00:14:43.328066  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:43.328089  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:43.328120  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:43.328351  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.328652  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:43.328698  123634 store.go:1310] Monitoring events count at <storage-prefix>//events
I0212 00:14:43.328698  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:43.328712  123634 master.go:415] Enabling API group "events.k8s.io".
W0212 00:14:43.334025  123634 genericapiserver.go:330] Skipping API batch/v2alpha1 because it has no resources.
W0212 00:14:43.346886  123634 genericapiserver.go:330] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources.
W0212 00:14:43.347442  123634 genericapiserver.go:330] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources.
W0212 00:14:43.349426  123634 genericapiserver.go:330] Skipping API storage.k8s.io/v1alpha1 because it has no resources.
I0212 00:14:43.363193  123634 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 00:14:43.363230  123634 healthz.go:170] healthz check poststarthook/bootstrap-controller failed: not finished
I0212 00:14:43.363238  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:43.363245  123634 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 00:14:43.363250  123634 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 00:14:43.363376  123634 wrap.go:47] GET /healthz: (282.537µs) 500
goroutine 28318 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00d3601c0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00d3601c0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00db38360, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc003438450, 0xc00005cb60, 0x18a, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc003438450, 0xc006b85600)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc003438450, 0xc006b85600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc003438450, 0xc006b85600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc003438450, 0xc006b85600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc003438450, 0xc006b85600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc003438450, 0xc006b85600)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc003438450, 0xc006b85600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc003438450, 0xc006b85600)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc003438450, 0xc006b85600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc003438450, 0xc006b85600)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc003438450, 0xc006b85600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc003438450, 0xc006b85500)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc003438450, 0xc006b85500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00a1a8840, 0xc00d0a8f00, 0x60decc0, 0xc003438450, 0xc006b85500)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[-]poststarthook/bootstrap-controller failed: reason withheld\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38378]
I0212 00:14:43.365113  123634 wrap.go:47] GET /api/v1/services: (1.213003ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38378]
I0212 00:14:43.369917  123634 wrap.go:47] GET /api/v1/services: (1.091898ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38378]
I0212 00:14:43.373227  123634 wrap.go:47] GET /api/v1/namespaces/default: (1.217427ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38378]
I0212 00:14:43.375236  123634 wrap.go:47] POST /api/v1/namespaces: (1.541733ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38378]
I0212 00:14:43.376633  123634 wrap.go:47] GET /api/v1/namespaces/default/services/kubernetes: (957.393µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38378]
I0212 00:14:43.380693  123634 wrap.go:47] POST /api/v1/namespaces/default/services: (3.538371ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38378]
I0212 00:14:43.382040  123634 wrap.go:47] GET /api/v1/namespaces/default/endpoints/kubernetes: (942.346µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38378]
I0212 00:14:43.384991  123634 wrap.go:47] POST /api/v1/namespaces/default/endpoints: (1.69426ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38378]
I0212 00:14:43.386569  123634 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.010097ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38378]
I0212 00:14:43.386934  123634 wrap.go:47] GET /api/v1/namespaces/default: (1.03149ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38380]
I0212 00:14:43.388302  123634 wrap.go:47] POST /api/v1/namespaces: (1.274382ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38378]
I0212 00:14:43.389084  123634 wrap.go:47] GET /api/v1/services: (2.326014ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38382]
I0212 00:14:43.389134  123634 wrap.go:47] GET /api/v1/namespaces/default/services/kubernetes: (1.841208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38380]
I0212 00:14:43.389990  123634 wrap.go:47] GET /api/v1/services: (1.464359ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38378]
I0212 00:14:43.390130  123634 wrap.go:47] GET /api/v1/namespaces/kube-public: (1.426873ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38384]
I0212 00:14:43.390653  123634 wrap.go:47] GET /api/v1/namespaces/default/endpoints/kubernetes: (917.67µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38382]
I0212 00:14:43.392316  123634 wrap.go:47] POST /api/v1/namespaces: (1.130154ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38378]
I0212 00:14:43.393578  123634 wrap.go:47] GET /api/v1/namespaces/kube-node-lease: (876.612µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38382]
I0212 00:14:43.395263  123634 wrap.go:47] POST /api/v1/namespaces: (1.332592ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38382]
I0212 00:14:43.464195  123634 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 00:14:43.464234  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:43.464242  123634 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 00:14:43.464247  123634 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 00:14:43.464416  123634 wrap.go:47] GET /healthz: (345.055µs) 500
goroutine 28159 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00d33ff80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00d33ff80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00e019880, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc0013289a0, 0xc0025ed200, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc0013289a0, 0xc00dc4dc00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc0013289a0, 0xc00dc4dc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc0013289a0, 0xc00dc4dc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc0013289a0, 0xc00dc4dc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc0013289a0, 0xc00dc4dc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc0013289a0, 0xc00dc4dc00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc0013289a0, 0xc00dc4dc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc0013289a0, 0xc00dc4dc00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc0013289a0, 0xc00dc4dc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc0013289a0, 0xc00dc4dc00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc0013289a0, 0xc00dc4dc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc0013289a0, 0xc00dc4db00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc0013289a0, 0xc00dc4db00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00dc4ac00, 0xc00d0a8f00, 0x60decc0, 0xc0013289a0, 0xc00dc4db00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38382]
I0212 00:14:43.564243  123634 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 00:14:43.564286  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:43.564298  123634 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 00:14:43.564317  123634 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 00:14:43.564511  123634 wrap.go:47] GET /healthz: (398.791µs) 500
goroutine 28395 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc019b23ab0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc019b23ab0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00dffd4a0, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc00d2402c8, 0xc00dd97680, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc00d2402c8, 0xc01a054200)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc00d2402c8, 0xc01a054200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc00d2402c8, 0xc01a054200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc00d2402c8, 0xc01a054200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc00d2402c8, 0xc01a054200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc00d2402c8, 0xc01a054200)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc00d2402c8, 0xc01a054200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc00d2402c8, 0xc01a054200)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc00d2402c8, 0xc01a054200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc00d2402c8, 0xc01a054200)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc00d2402c8, 0xc01a054200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc00d2402c8, 0xc01a054100)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc00d2402c8, 0xc01a054100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc019b15500, 0xc00d0a8f00, 0x60decc0, 0xc00d2402c8, 0xc01a054100)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38382]
I0212 00:14:43.664121  123634 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 00:14:43.664155  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:43.664163  123634 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 00:14:43.664170  123634 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 00:14:43.664312  123634 wrap.go:47] GET /healthz: (324.629µs) 500
goroutine 28397 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc019b23c00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc019b23c00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00dffd720, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc00d2402f0, 0xc00dd97c80, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc00d2402f0, 0xc01a054800)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc00d2402f0, 0xc01a054800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc00d2402f0, 0xc01a054800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc00d2402f0, 0xc01a054800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc00d2402f0, 0xc01a054800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc00d2402f0, 0xc01a054800)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc00d2402f0, 0xc01a054800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc00d2402f0, 0xc01a054800)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc00d2402f0, 0xc01a054800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc00d2402f0, 0xc01a054800)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc00d2402f0, 0xc01a054800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc00d2402f0, 0xc01a054700)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc00d2402f0, 0xc01a054700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc019b156e0, 0xc00d0a8f00, 0x60decc0, 0xc00d2402f0, 0xc01a054700)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38382]
I0212 00:14:43.764207  123634 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 00:14:43.764248  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:43.764259  123634 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 00:14:43.764266  123634 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 00:14:43.764429  123634 wrap.go:47] GET /healthz: (374.338µs) 500
goroutine 28399 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc019b23ce0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc019b23ce0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00dffd7c0, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc00d2402f8, 0xc01907a300, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc00d2402f8, 0xc01a054c00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc00d2402f8, 0xc01a054c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc00d2402f8, 0xc01a054c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc00d2402f8, 0xc01a054c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc00d2402f8, 0xc01a054c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc00d2402f8, 0xc01a054c00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc00d2402f8, 0xc01a054c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc00d2402f8, 0xc01a054c00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc00d2402f8, 0xc01a054c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc00d2402f8, 0xc01a054c00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc00d2402f8, 0xc01a054c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc00d2402f8, 0xc01a054b00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc00d2402f8, 0xc01a054b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc019b157a0, 0xc00d0a8f00, 0x60decc0, 0xc00d2402f8, 0xc01a054b00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38382]
I0212 00:14:43.864197  123634 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 00:14:43.864236  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:43.864246  123634 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 00:14:43.864253  123634 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 00:14:43.864407  123634 wrap.go:47] GET /healthz: (344.652µs) 500
goroutine 28161 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc019ffc150, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc019ffc150, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00e019d20, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc001328a00, 0xc0025ed980, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc001328a00, 0xc01a094400)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc001328a00, 0xc01a094400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc001328a00, 0xc01a094400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc001328a00, 0xc01a094400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc001328a00, 0xc01a094400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc001328a00, 0xc01a094400)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc001328a00, 0xc01a094400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc001328a00, 0xc01a094400)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc001328a00, 0xc01a094400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc001328a00, 0xc01a094400)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc001328a00, 0xc01a094400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc001328a00, 0xc01a094300)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc001328a00, 0xc01a094300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00dc4af00, 0xc00d0a8f00, 0x60decc0, 0xc001328a00, 0xc01a094300)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38382]
I0212 00:14:43.964210  123634 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 00:14:43.964258  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:43.964268  123634 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 00:14:43.964275  123634 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 00:14:43.964434  123634 wrap.go:47] GET /healthz: (368.622µs) 500
goroutine 28419 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc019ffc230, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc019ffc230, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00e019e20, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc001328a40, 0xc0025ede00, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc001328a40, 0xc01a094a00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc001328a40, 0xc01a094a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc001328a40, 0xc01a094a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc001328a40, 0xc01a094a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc001328a40, 0xc01a094a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc001328a40, 0xc01a094a00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc001328a40, 0xc01a094a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc001328a40, 0xc01a094a00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc001328a40, 0xc01a094a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc001328a40, 0xc01a094a00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc001328a40, 0xc01a094a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc001328a40, 0xc01a094900)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc001328a40, 0xc01a094900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00dc4b080, 0xc00d0a8f00, 0x60decc0, 0xc001328a40, 0xc01a094900)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38382]
I0212 00:14:44.064288  123634 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 00:14:44.064329  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:44.064352  123634 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 00:14:44.064372  123634 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 00:14:44.064573  123634 wrap.go:47] GET /healthz: (387.859µs) 500
goroutine 28222 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00d356b60, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00d356b60, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00db035e0, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc003248188, 0xc001508a80, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc003248188, 0xc019fc6800)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc003248188, 0xc019fc6800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc003248188, 0xc019fc6800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc003248188, 0xc019fc6800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc003248188, 0xc019fc6800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc003248188, 0xc019fc6800)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc003248188, 0xc019fc6800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc003248188, 0xc019fc6800)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc003248188, 0xc019fc6800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc003248188, 0xc019fc6800)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc003248188, 0xc019fc6800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc003248188, 0xc019fc6700)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc003248188, 0xc019fc6700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0085948a0, 0xc00d0a8f00, 0x60decc0, 0xc003248188, 0xc019fc6700)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38382]
I0212 00:14:44.164225  123634 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0212 00:14:44.164265  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:44.164275  123634 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 00:14:44.164282  123634 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 00:14:44.164501  123634 wrap.go:47] GET /healthz: (388.555µs) 500
goroutine 28421 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc019ffc380, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc019ffc380, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00e0820e0, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc001328b78, 0xc00138c480, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc001328b78, 0xc01a095000)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc001328b78, 0xc01a095000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc001328b78, 0xc01a095000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc001328b78, 0xc01a095000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc001328b78, 0xc01a095000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc001328b78, 0xc01a095000)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc001328b78, 0xc01a095000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc001328b78, 0xc01a095000)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc001328b78, 0xc01a095000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc001328b78, 0xc01a095000)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc001328b78, 0xc01a095000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc001328b78, 0xc01a094f00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc001328b78, 0xc01a094f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00dc4b260, 0xc00d0a8f00, 0x60decc0, 0xc001328b78, 0xc01a094f00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38382]
I0212 00:14:44.232334  123634 clientconn.go:551] parsed scheme: ""
I0212 00:14:44.232371  123634 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0212 00:14:44.232420  123634 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0212 00:14:44.232498  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:44.233002  123634 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0212 00:14:44.233049  123634 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0212 00:14:44.265343  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:44.265371  123634 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 00:14:44.265380  123634 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 00:14:44.265620  123634 wrap.go:47] GET /healthz: (1.494402ms) 500
goroutine 28224 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00d356c40, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00d356c40, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00db036a0, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc003248190, 0xc00a30f4a0, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc003248190, 0xc019fc6c00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc003248190, 0xc019fc6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc003248190, 0xc019fc6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc003248190, 0xc019fc6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc003248190, 0xc019fc6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc003248190, 0xc019fc6c00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc003248190, 0xc019fc6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc003248190, 0xc019fc6c00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc003248190, 0xc019fc6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc003248190, 0xc019fc6c00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc003248190, 0xc019fc6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc003248190, 0xc019fc6b00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc003248190, 0xc019fc6b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc008594960, 0xc00d0a8f00, 0x60decc0, 0xc003248190, 0xc019fc6b00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38382]
I0212 00:14:44.364530  123634 wrap.go:47] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-node-critical: (1.248044ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38382]
I0212 00:14:44.364678  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.206968ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38434]
I0212 00:14:44.364712  123634 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.430014ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38380]
I0212 00:14:44.365079  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:44.365108  123634 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0212 00:14:44.365117  123634 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0212 00:14:44.365258  123634 wrap.go:47] GET /healthz: (1.02046ms) 500
goroutine 28468 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc019ffc700, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc019ffc700, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00e082f40, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc001328c90, 0xc00d68edc0, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc001328c90, 0xc01a095e00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc001328c90, 0xc01a095e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc001328c90, 0xc01a095e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc001328c90, 0xc01a095e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc001328c90, 0xc01a095e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc001328c90, 0xc01a095e00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc001328c90, 0xc01a095e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc001328c90, 0xc01a095e00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc001328c90, 0xc01a095e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc001328c90, 0xc01a095e00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc001328c90, 0xc01a095e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc001328c90, 0xc01a095d00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc001328c90, 0xc01a095d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01a430300, 0xc00d0a8f00, 0x60decc0, 0xc001328c90, 0xc01a095d00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38436]
I0212 00:14:44.366945  123634 wrap.go:47] GET /api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication: (1.91751ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38382]
I0212 00:14:44.366969  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.833553ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38434]
I0212 00:14:44.367062  123634 wrap.go:47] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (1.794472ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.367370  123634 storage_scheduling.go:91] created PriorityClass system-node-critical with value 2000001000
I0212 00:14:44.368661  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (1.407855ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38434]
I0212 00:14:44.368673  123634 wrap.go:47] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-cluster-critical: (1.184498ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.368790  123634 wrap.go:47] POST /api/v1/namespaces/kube-system/configmaps: (1.42825ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.370297  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (1.298805ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38434]
I0212 00:14:44.370964  123634 wrap.go:47] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (1.975765ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.371159  123634 storage_scheduling.go:91] created PriorityClass system-cluster-critical with value 2000000000
I0212 00:14:44.371197  123634 storage_scheduling.go:100] all system priority classes are created successfully or already exist.
I0212 00:14:44.371381  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (767.955µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38434]
I0212 00:14:44.372442  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (697.86µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.373517  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (711.824µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.374581  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (728.41µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.375726  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/cluster-admin: (782.72µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.377353  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.328963ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.377593  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/cluster-admin
I0212 00:14:44.378797  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:discovery: (984.747µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.380589  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.425185ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.380775  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:discovery
I0212 00:14:44.381844  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:basic-user: (888.98µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.383789  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.586718ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.384020  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:basic-user
I0212 00:14:44.385183  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (936.681µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.387236  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.642096ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.387438  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/admin
I0212 00:14:44.388647  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (942.124µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.390730  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.633635ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.390991  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/edit
I0212 00:14:44.392103  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (907.435µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.394005  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.418935ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.394233  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/view
I0212 00:14:44.395377  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (960.037µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.397240  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.409893ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.397517  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-admin
I0212 00:14:44.398618  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (861.383µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.400964  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.888711ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.401316  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-edit
I0212 00:14:44.402671  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (1.080894ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.406077  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.882457ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.406534  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-view
I0212 00:14:44.407875  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:heapster: (1.073487ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.409789  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.482646ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.409968  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:heapster
I0212 00:14:44.411273  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node: (1.091122ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.414116  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.320936ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.414424  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node
I0212 00:14:44.415535  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-problem-detector: (780.108µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.417845  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.882605ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.418122  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node-problem-detector
I0212 00:14:44.419362  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-proxier: (1.020338ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.421377  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.561923ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.421647  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node-proxier
I0212 00:14:44.422865  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kubelet-api-admin: (1.014005ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.424961  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.683059ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.425185  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kubelet-api-admin
I0212 00:14:44.426261  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-bootstrapper: (846.549µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.427944  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.200132ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.428168  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node-bootstrapper
I0212 00:14:44.429211  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:auth-delegator: (863.162µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.430970  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.358271ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.431155  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:auth-delegator
I0212 00:14:44.432202  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-aggregator: (800.639µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.434038  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.392469ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.434229  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-aggregator
I0212 00:14:44.435419  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-controller-manager: (992.322µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.437777  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.86043ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.437982  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-controller-manager
I0212 00:14:44.439127  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-scheduler: (881.233µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.441607  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.902242ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.441886  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-scheduler
I0212 00:14:44.442915  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-dns: (835.597µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.444695  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.430074ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.444939  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-dns
I0212 00:14:44.446224  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:persistent-volume-provisioner: (1.078245ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.448157  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.466062ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.448409  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:persistent-volume-provisioner
I0212 00:14:44.449596  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-attacher: (858.972µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.452090  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.915662ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.452340  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:csi-external-attacher
I0212 00:14:44.454205  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aws-cloud-provider: (835.013µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.456069  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.457741ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.456257  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aws-cloud-provider
I0212 00:14:44.457370  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:nodeclient: (907.651µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.459321  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.530655ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.459619  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:nodeclient
I0212 00:14:44.460680  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient: (856.558µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.462678  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.594299ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.462908  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
I0212 00:14:44.463972  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:volume-scheduler: (844.995µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.465262  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:44.465428  123634 wrap.go:47] GET /healthz: (1.306663ms) 500
goroutine 28602 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01bf7b8f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01bf7b8f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01537e440, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc00dbf20e0, 0xc0034ccc80, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc00dbf20e0, 0xc01c058c00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc00dbf20e0, 0xc01c058c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc00dbf20e0, 0xc01c058c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc00dbf20e0, 0xc01c058c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc00dbf20e0, 0xc01c058c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc00dbf20e0, 0xc01c058c00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc00dbf20e0, 0xc01c058c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc00dbf20e0, 0xc01c058c00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc00dbf20e0, 0xc01c058c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc00dbf20e0, 0xc01c058c00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc00dbf20e0, 0xc01c058c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc00dbf20e0, 0xc01c058b00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc00dbf20e0, 0xc01c058b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c05e2a0, 0xc00d0a8f00, 0x60decc0, 0xc00dbf20e0, 0xc01c058b00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38436]
I0212 00:14:44.465814  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.432574ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.466031  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:volume-scheduler
I0212 00:14:44.467039  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-provisioner: (810.608µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.469208  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.799886ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.469684  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:csi-external-provisioner
I0212 00:14:44.471009  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:attachdetach-controller: (1.011862ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.473505  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.05679ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.473920  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I0212 00:14:44.475170  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:clusterrole-aggregation-controller: (973.919µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.477141  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.502807ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.477387  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I0212 00:14:44.478468  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:cronjob-controller: (860.123µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.481537  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.750025ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.481817  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:cronjob-controller
I0212 00:14:44.482917  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:daemon-set-controller: (884.855µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.484958  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.546389ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.485266  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I0212 00:14:44.486723  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:deployment-controller: (1.179292ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.488739  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.588159ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.488965  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:deployment-controller
I0212 00:14:44.490054  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:disruption-controller: (809.827µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.492136  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.654129ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.492421  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:disruption-controller
I0212 00:14:44.493669  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:endpoint-controller: (893.256µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.495718  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.612994ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.495961  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:endpoint-controller
I0212 00:14:44.497038  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:expand-controller: (840.162µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.498877  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.43462ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.499101  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:expand-controller
I0212 00:14:44.500065  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:generic-garbage-collector: (744.042µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.501965  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.479218ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.502183  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I0212 00:14:44.503226  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:horizontal-pod-autoscaler: (841.931µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.505317  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.608821ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.505715  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I0212 00:14:44.506726  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:job-controller: (833.244µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.508672  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.546334ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.508892  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:job-controller
I0212 00:14:44.509935  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:namespace-controller: (832.6µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.511796  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.476562ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.512255  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:namespace-controller
I0212 00:14:44.513322  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:node-controller: (893.823µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.515271  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.513807ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.515512  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:node-controller
I0212 00:14:44.516477  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:persistent-volume-binder: (743.903µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.518596  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.658048ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.518816  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I0212 00:14:44.520633  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pod-garbage-collector: (1.581901ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.522894  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.5663ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.523131  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I0212 00:14:44.524157  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replicaset-controller: (827.028µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.526117  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.445573ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.526342  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:replicaset-controller
I0212 00:14:44.527366  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replication-controller: (780.603µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.529324  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.541984ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.529749  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:replication-controller
I0212 00:14:44.531033  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:resourcequota-controller: (885.026µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.533345  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.543266ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.533589  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I0212 00:14:44.534612  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:route-controller: (788.892µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.536468  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.443542ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.536795  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:route-controller
I0212 00:14:44.537985  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-account-controller: (936.795µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.539850  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.474962ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.540129  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:service-account-controller
I0212 00:14:44.541302  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-controller: (924.958µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.543570  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.664641ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.543803  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:service-controller
I0212 00:14:44.544980  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:statefulset-controller: (975.935µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.547296  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.882001ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.547601  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:statefulset-controller
I0212 00:14:44.548789  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:ttl-controller: (860.606µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.551575  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.685535ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.551744  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:ttl-controller
I0212 00:14:44.552858  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:certificate-controller: (884.448µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.565245  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:44.565418  123634 wrap.go:47] GET /healthz: (1.279024ms) 500
goroutine 28624 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c184af0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c184af0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01580bdc0, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc0034393e0, 0xc004bf1680, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc0034393e0, 0xc01c1dee00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc0034393e0, 0xc01c1dee00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc0034393e0, 0xc01c1dee00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc0034393e0, 0xc01c1dee00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc0034393e0, 0xc01c1dee00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc0034393e0, 0xc01c1dee00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc0034393e0, 0xc01c1dee00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc0034393e0, 0xc01c1dee00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc0034393e0, 0xc01c1dee00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc0034393e0, 0xc01c1dee00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc0034393e0, 0xc01c1dee00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc0034393e0, 0xc01c1ded00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc0034393e0, 0xc01c1ded00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c1daba0, 0xc00d0a8f00, 0x60decc0, 0xc0034393e0, 0xc01c1ded00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38438]
I0212 00:14:44.565884  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.206917ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.566199  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:certificate-controller
I0212 00:14:44.585059  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pvc-protection-controller: (1.401397ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.605799  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.160881ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.606103  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I0212 00:14:44.625420  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pv-protection-controller: (1.629798ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.648766  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (5.055439ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.649018  123634 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I0212 00:14:44.664979  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:44.665215  123634 wrap.go:47] GET /healthz: (1.235334ms) 500
goroutine 28720 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c251420, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c251420, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc015a9aba0, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc00dca4110, 0xc0034cd040, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc00dca4110, 0xc01c255c00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc00dca4110, 0xc01c255c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc00dca4110, 0xc01c255c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc00dca4110, 0xc01c255c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc00dca4110, 0xc01c255c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc00dca4110, 0xc01c255c00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc00dca4110, 0xc01c255c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc00dca4110, 0xc01c255c00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc00dca4110, 0xc01c255c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc00dca4110, 0xc01c255c00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc00dca4110, 0xc01c255c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc00dca4110, 0xc01c255b00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc00dca4110, 0xc01c255b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c267aa0, 0xc00d0a8f00, 0x60decc0, 0xc00dca4110, 0xc01c255b00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38438]
I0212 00:14:44.665505  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/cluster-admin: (1.763396ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.686435  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.760284ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.686732  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/cluster-admin
I0212 00:14:44.705081  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:discovery: (1.472925ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.726830  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.177791ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.727307  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:discovery
I0212 00:14:44.744783  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:basic-user: (1.294917ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.765113  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:44.765308  123634 wrap.go:47] GET /healthz: (1.363895ms) 500
goroutine 28787 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c2b0e00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c2b0e00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc015b18ce0, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc0038d3e10, 0xc0000772c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc0038d3e10, 0xc01c2e6d00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc0038d3e10, 0xc01c2e6d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc0038d3e10, 0xc01c2e6d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc0038d3e10, 0xc01c2e6d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc0038d3e10, 0xc01c2e6d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc0038d3e10, 0xc01c2e6d00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc0038d3e10, 0xc01c2e6d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc0038d3e10, 0xc01c2e6d00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc0038d3e10, 0xc01c2e6d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc0038d3e10, 0xc01c2e6d00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc0038d3e10, 0xc01c2e6d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc0038d3e10, 0xc01c2e6c00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc0038d3e10, 0xc01c2e6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c2beba0, 0xc00d0a8f00, 0x60decc0, 0xc0038d3e10, 0xc01c2e6c00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38438]
I0212 00:14:44.765928  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.310429ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.766119  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:basic-user
I0212 00:14:44.785064  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node-proxier: (1.44031ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.805853  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.245011ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.806122  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:node-proxier
I0212 00:14:44.825041  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-controller-manager: (1.37493ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.846455  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.839871ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.846746  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-controller-manager
I0212 00:14:44.864950  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:44.865039  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-dns: (1.425381ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:44.865126  123634 wrap.go:47] GET /healthz: (934.099µs) 500
goroutine 28791 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c2b16c0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c2b16c0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc015b19c80, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc0038d3fe0, 0xc0000777c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc0038d3fe0, 0xc01c2e7e00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc0038d3fe0, 0xc01c2e7e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc0038d3fe0, 0xc01c2e7e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc0038d3fe0, 0xc01c2e7e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc0038d3fe0, 0xc01c2e7e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc0038d3fe0, 0xc01c2e7e00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc0038d3fe0, 0xc01c2e7e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc0038d3fe0, 0xc01c2e7e00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc0038d3fe0, 0xc01c2e7e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc0038d3fe0, 0xc01c2e7e00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc0038d3fe0, 0xc01c2e7e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc0038d3fe0, 0xc01c2e7d00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc0038d3fe0, 0xc01c2e7d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c2bf500, 0xc00d0a8f00, 0x60decc0, 0xc0038d3fe0, 0xc01c2e7d00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38438]
I0212 00:14:44.885649  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.215995ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.885940  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-dns
I0212 00:14:44.904888  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-scheduler: (1.360943ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.925790  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.251392ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.926114  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-scheduler
I0212 00:14:44.945303  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:aws-cloud-provider: (1.755954ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.965246  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:44.965418  123634 wrap.go:47] GET /healthz: (1.350382ms) 500
goroutine 28736 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c219ea0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c219ea0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc015ac5340, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc001329c68, 0xc0034cd540, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc001329c68, 0xc01c374f00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc001329c68, 0xc01c374f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc001329c68, 0xc01c374f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc001329c68, 0xc01c374f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc001329c68, 0xc01c374f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc001329c68, 0xc01c374f00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc001329c68, 0xc01c374f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc001329c68, 0xc01c374f00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc001329c68, 0xc01c374f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc001329c68, 0xc01c374f00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc001329c68, 0xc01c374f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc001329c68, 0xc01c374e00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc001329c68, 0xc01c374e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c388540, 0xc00d0a8f00, 0x60decc0, 0xc001329c68, 0xc01c374e00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38436]
I0212 00:14:44.966071  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.30087ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:44.966318  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:aws-cloud-provider
I0212 00:14:44.985563  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:volume-scheduler: (2.079867ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.005898  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.276055ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.006189  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:volume-scheduler
I0212 00:14:45.025150  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node: (1.518141ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.046050  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.384877ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.046474  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:node
I0212 00:14:45.065027  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:attachdetach-controller: (1.404785ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.065128  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:45.065284  123634 wrap.go:47] GET /healthz: (1.32672ms) 500
goroutine 28793 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c3dc310, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c3dc310, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc015b95040, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc00dd32110, 0xc0039bf040, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc00dd32110, 0xc01c370a00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc00dd32110, 0xc01c370a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc00dd32110, 0xc01c370a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc00dd32110, 0xc01c370a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc00dd32110, 0xc01c370a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc00dd32110, 0xc01c370a00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc00dd32110, 0xc01c370a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc00dd32110, 0xc01c370a00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc00dd32110, 0xc01c370a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc00dd32110, 0xc01c370a00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc00dd32110, 0xc01c370a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc00dd32110, 0xc01c370900)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc00dd32110, 0xc01c370900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c2bf8c0, 0xc00d0a8f00, 0x60decc0, 0xc00dd32110, 0xc01c370900)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38438]
I0212 00:14:45.085631  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.059503ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.085915  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I0212 00:14:45.104766  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:clusterrole-aggregation-controller: (1.219091ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.125893  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.279611ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.126312  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I0212 00:14:45.145116  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:cronjob-controller: (1.446183ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.165368  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:45.165606  123634 wrap.go:47] GET /healthz: (1.367203ms) 500
goroutine 28815 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c3bb180, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c3bb180, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc015d15580, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc001329ef0, 0xc0039bf400, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc001329ef0, 0xc01c3d5b00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc001329ef0, 0xc01c3d5b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc001329ef0, 0xc01c3d5b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc001329ef0, 0xc01c3d5b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc001329ef0, 0xc01c3d5b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc001329ef0, 0xc01c3d5b00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc001329ef0, 0xc01c3d5b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc001329ef0, 0xc01c3d5b00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc001329ef0, 0xc01c3d5b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc001329ef0, 0xc01c3d5b00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc001329ef0, 0xc01c3d5b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc001329ef0, 0xc01c3d5a00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc001329ef0, 0xc01c3d5a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c4200c0, 0xc00d0a8f00, 0x60decc0, 0xc001329ef0, 0xc01c3d5a00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38438]
I0212 00:14:45.166622  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.243077ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.167179  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:cronjob-controller
I0212 00:14:45.185379  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:daemon-set-controller: (1.759331ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.206295  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.794337ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.206618  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I0212 00:14:45.225079  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:deployment-controller: (1.459035ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.246880  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.388284ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.247180  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:deployment-controller
I0212 00:14:45.264809  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:45.264881  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:disruption-controller: (1.345038ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.264990  123634 wrap.go:47] GET /healthz: (1.030574ms) 500
goroutine 28867 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c33d650, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c33d650, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc015baeac0, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc003249938, 0xc002da2dc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc003249938, 0xc01c384f00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc003249938, 0xc01c384f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc003249938, 0xc01c384f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc003249938, 0xc01c384f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc003249938, 0xc01c384f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc003249938, 0xc01c384f00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc003249938, 0xc01c384f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc003249938, 0xc01c384f00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc003249938, 0xc01c384f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc003249938, 0xc01c384f00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc003249938, 0xc01c384f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc003249938, 0xc01c384e00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc003249938, 0xc01c384e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c3ac3c0, 0xc00d0a8f00, 0x60decc0, 0xc003249938, 0xc01c384e00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38438]
I0212 00:14:45.286146  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.501865ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.287084  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:disruption-controller
I0212 00:14:45.305010  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:endpoint-controller: (1.396938ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.326019  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.506549ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.326537  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:endpoint-controller
I0212 00:14:45.345209  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:expand-controller: (1.56797ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.364952  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:45.365192  123634 wrap.go:47] GET /healthz: (1.132947ms) 500
goroutine 28871 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c33ddc0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c33ddc0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc015bafa00, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc0032499f8, 0xc01938c280, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc0032499f8, 0xc01c385f00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc0032499f8, 0xc01c385f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc0032499f8, 0xc01c385f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc0032499f8, 0xc01c385f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc0032499f8, 0xc01c385f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc0032499f8, 0xc01c385f00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc0032499f8, 0xc01c385f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc0032499f8, 0xc01c385f00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc0032499f8, 0xc01c385f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc0032499f8, 0xc01c385f00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc0032499f8, 0xc01c385f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc0032499f8, 0xc01c385e00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc0032499f8, 0xc01c385e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c3ad260, 0xc00d0a8f00, 0x60decc0, 0xc0032499f8, 0xc01c385e00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38436]
I0212 00:14:45.365902  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.299953ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.366128  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:expand-controller
I0212 00:14:45.384801  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:generic-garbage-collector: (1.267719ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.405904  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.344379ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.406163  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I0212 00:14:45.427494  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:horizontal-pod-autoscaler: (1.431822ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.446744  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.117405ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.447086  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I0212 00:14:45.465521  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:45.465732  123634 wrap.go:47] GET /healthz: (1.426355ms) 500
goroutine 28842 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c500380, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c500380, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc016a742c0, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc00dca4930, 0xc01938c780, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc00dca4930, 0xc01c4ee600)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc00dca4930, 0xc01c4ee600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc00dca4930, 0xc01c4ee600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc00dca4930, 0xc01c4ee600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc00dca4930, 0xc01c4ee600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc00dca4930, 0xc01c4ee600)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc00dca4930, 0xc01c4ee600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc00dca4930, 0xc01c4ee600)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc00dca4930, 0xc01c4ee600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc00dca4930, 0xc01c4ee600)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc00dca4930, 0xc01c4ee600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc00dca4930, 0xc01c4ee500)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc00dca4930, 0xc01c4ee500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c3dfc80, 0xc00d0a8f00, 0x60decc0, 0xc00dca4930, 0xc01c4ee500)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38436]
I0212 00:14:45.466161  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:job-controller: (2.561095ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.486146  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.595405ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.486803  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:job-controller
I0212 00:14:45.504920  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:namespace-controller: (1.278715ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.525706  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.223452ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.525984  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:namespace-controller
I0212 00:14:45.544964  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:node-controller: (1.46942ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.564858  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:45.565058  123634 wrap.go:47] GET /healthz: (1.112544ms) 500
goroutine 28878 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c4d6b60, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c4d6b60, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc016cfabe0, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc003249b98, 0xc005995180, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc003249b98, 0xc01c4b9900)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc003249b98, 0xc01c4b9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc003249b98, 0xc01c4b9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc003249b98, 0xc01c4b9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc003249b98, 0xc01c4b9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc003249b98, 0xc01c4b9900)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc003249b98, 0xc01c4b9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc003249b98, 0xc01c4b9900)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc003249b98, 0xc01c4b9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc003249b98, 0xc01c4b9900)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc003249b98, 0xc01c4b9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc003249b98, 0xc01c4b9800)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc003249b98, 0xc01c4b9800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c5165a0, 0xc00d0a8f00, 0x60decc0, 0xc003249b98, 0xc01c4b9800)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38436]
I0212 00:14:45.565541  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.911445ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.565915  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:node-controller
I0212 00:14:45.584859  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:persistent-volume-binder: (1.267105ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.605774  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.168188ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.606073  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I0212 00:14:45.624895  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pod-garbage-collector: (1.30973ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.649213  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (5.655912ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.649679  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I0212 00:14:45.665173  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:45.665311  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replicaset-controller: (1.481152ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.665352  123634 wrap.go:47] GET /healthz: (1.387119ms) 500
goroutine 28880 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c4d6ee0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c4d6ee0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc016cfb380, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc003249bf8, 0xc01938cdc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc003249bf8, 0xc01c560400)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc003249bf8, 0xc01c560400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc003249bf8, 0xc01c560400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc003249bf8, 0xc01c560400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc003249bf8, 0xc01c560400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc003249bf8, 0xc01c560400)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc003249bf8, 0xc01c560400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc003249bf8, 0xc01c560400)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc003249bf8, 0xc01c560400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc003249bf8, 0xc01c560400)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc003249bf8, 0xc01c560400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc003249bf8, 0xc01c560300)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc003249bf8, 0xc01c560300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c5169c0, 0xc00d0a8f00, 0x60decc0, 0xc003249bf8, 0xc01c560300)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38436]
I0212 00:14:45.685739  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.202692ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.686110  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replicaset-controller
I0212 00:14:45.705008  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replication-controller: (1.464033ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.725876  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.246033ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.726154  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replication-controller
I0212 00:14:45.745078  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:resourcequota-controller: (1.484141ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.765226  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:45.765429  123634 wrap.go:47] GET /healthz: (1.164777ms) 500
goroutine 28934 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c5b2380, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c5b2380, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0171bf380, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc00dca4f08, 0xc01938d2c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc00dca4f08, 0xc01c5b0600)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc00dca4f08, 0xc01c5b0600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc00dca4f08, 0xc01c5b0600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc00dca4f08, 0xc01c5b0600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc00dca4f08, 0xc01c5b0600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc00dca4f08, 0xc01c5b0600)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc00dca4f08, 0xc01c5b0600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc00dca4f08, 0xc01c5b0600)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc00dca4f08, 0xc01c5b0600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc00dca4f08, 0xc01c5b0600)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc00dca4f08, 0xc01c5b0600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc00dca4f08, 0xc01c5b0500)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc00dca4f08, 0xc01c5b0500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c52d200, 0xc00d0a8f00, 0x60decc0, 0xc00dca4f08, 0xc01c5b0500)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38438]
I0212 00:14:45.765943  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.354898ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.766300  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I0212 00:14:45.785010  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:route-controller: (1.360954ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.805663  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.05966ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.805899  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:route-controller
I0212 00:14:45.825906  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-account-controller: (1.36647ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.845794  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.257435ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.846104  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-account-controller
I0212 00:14:45.865939  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:45.866234  123634 wrap.go:47] GET /healthz: (1.941324ms) 500
goroutine 28938 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c5b2af0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c5b2af0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc017b20ba0, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc00dca5018, 0xc005995680, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc00dca5018, 0xc01c5b1600)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc00dca5018, 0xc01c5b1600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc00dca5018, 0xc01c5b1600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc00dca5018, 0xc01c5b1600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc00dca5018, 0xc01c5b1600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc00dca5018, 0xc01c5b1600)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc00dca5018, 0xc01c5b1600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc00dca5018, 0xc01c5b1600)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc00dca5018, 0xc01c5b1600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc00dca5018, 0xc01c5b1600)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc00dca5018, 0xc01c5b1600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc00dca5018, 0xc01c5b1500)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc00dca5018, 0xc01c5b1500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c52dc80, 0xc00d0a8f00, 0x60decc0, 0xc00dca5018, 0xc01c5b1500)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38436]
I0212 00:14:45.866272  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-controller: (2.614426ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:45.885859  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.891065ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.886172  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-controller
I0212 00:14:45.904787  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:statefulset-controller: (1.271449ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.925755  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.197907ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.926068  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:statefulset-controller
I0212 00:14:45.945513  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:ttl-controller: (1.877957ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.965132  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:45.965342  123634 wrap.go:47] GET /healthz: (1.334818ms) 500
goroutine 28928 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c61ed90, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c61ed90, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc017ba1fc0, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc01725a0d0, 0xc0039bf900, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc01725a0d0, 0xc01c5ff900)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc01725a0d0, 0xc01c5ff900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc01725a0d0, 0xc01c5ff900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc01725a0d0, 0xc01c5ff900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc01725a0d0, 0xc01c5ff900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc01725a0d0, 0xc01c5ff900)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc01725a0d0, 0xc01c5ff900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc01725a0d0, 0xc01c5ff900)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc01725a0d0, 0xc01c5ff900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc01725a0d0, 0xc01c5ff900)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc01725a0d0, 0xc01c5ff900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc01725a0d0, 0xc01c5ff800)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc01725a0d0, 0xc01c5ff800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c620a20, 0xc00d0a8f00, 0x60decc0, 0xc01725a0d0, 0xc01c5ff800)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38438]
I0212 00:14:45.967292  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.752299ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:45.967581  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:ttl-controller
I0212 00:14:45.984893  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:certificate-controller: (1.312124ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.005664  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.055103ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.006302  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:certificate-controller
I0212 00:14:46.024977  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pvc-protection-controller: (1.38544ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.046306  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.764869ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.046640  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I0212 00:14:46.064947  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:46.065094  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pv-protection-controller: (1.44601ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.065149  123634 wrap.go:47] GET /healthz: (1.235388ms) 500
goroutine 28978 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c61f0a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c61f0a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc017bfc620, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc01725a1d0, 0xc01938d7c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc01725a1d0, 0xc01c5fff00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc01725a1d0, 0xc01c5fff00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc01725a1d0, 0xc01c5fff00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc01725a1d0, 0xc01c5fff00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc01725a1d0, 0xc01c5fff00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc01725a1d0, 0xc01c5fff00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc01725a1d0, 0xc01c5fff00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc01725a1d0, 0xc01c5fff00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc01725a1d0, 0xc01c5fff00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc01725a1d0, 0xc01c5fff00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc01725a1d0, 0xc01c5fff00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc01725a1d0, 0xc01c5ffe00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc01725a1d0, 0xc01c5ffe00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c620e40, 0xc00d0a8f00, 0x60decc0, 0xc01725a1d0, 0xc01c5ffe00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38438]
I0212 00:14:46.085843  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.314563ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.086133  123634 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I0212 00:14:46.104763  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/extension-apiserver-authentication-reader: (1.242672ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.106770  123634 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.489235ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.125785  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.13028ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.126103  123634 storage_rbac.go:246] created role.rbac.authorization.k8s.io/extension-apiserver-authentication-reader in kube-system
I0212 00:14:46.145130  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:bootstrap-signer: (1.540461ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.147025  123634 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.414932ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.165107  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:46.165318  123634 wrap.go:47] GET /healthz: (1.308205ms) 500
goroutine 28994 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c5b39d0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c5b39d0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc01814fc00, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc00dca5158, 0xc01938dcc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc00dca5158, 0xc01c6a1100)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc00dca5158, 0xc01c6a1100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc00dca5158, 0xc01c6a1100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc00dca5158, 0xc01c6a1100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc00dca5158, 0xc01c6a1100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc00dca5158, 0xc01c6a1100)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc00dca5158, 0xc01c6a1100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc00dca5158, 0xc01c6a1100)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc00dca5158, 0xc01c6a1100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc00dca5158, 0xc01c6a1100)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc00dca5158, 0xc01c6a1100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc00dca5158, 0xc01c6a1000)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc00dca5158, 0xc01c6a1000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c62eea0, 0xc00d0a8f00, 0x60decc0, 0xc00dca5158, 0xc01c6a1000)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38436]
I0212 00:14:46.165936  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.272435ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.166195  123634 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I0212 00:14:46.185220  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:cloud-provider: (1.403629ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.187256  123634 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.424042ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.206214  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.563268ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.206457  123634 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0212 00:14:46.225064  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:token-cleaner: (1.460812ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.227169  123634 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.507677ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.245956  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.315126ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.246311  123634 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I0212 00:14:46.267367  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:46.267588  123634 wrap.go:47] GET /healthz: (1.655892ms) 500
goroutine 28990 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c708ee0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c708ee0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc018ddd940, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc01725a658, 0xc004747cc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc01725a658, 0xc01c720d00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc01725a658, 0xc01c720d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc01725a658, 0xc01c720d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc01725a658, 0xc01c720d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc01725a658, 0xc01c720d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc01725a658, 0xc01c720d00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc01725a658, 0xc01c720d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc01725a658, 0xc01c720d00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc01725a658, 0xc01c720d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc01725a658, 0xc01c720d00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc01725a658, 0xc01c720d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc01725a658, 0xc01c720c00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc01725a658, 0xc01c720c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c5aa660, 0xc00d0a8f00, 0x60decc0, 0xc01725a658, 0xc01c720c00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38436]
I0212 00:14:46.267873  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-controller-manager: (1.948135ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.271166  123634 wrap.go:47] GET /api/v1/namespaces/kube-system: (2.51182ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.285704  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.206355ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.286036  123634 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I0212 00:14:46.305243  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-scheduler: (1.824654ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.307341  123634 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.419749ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.325971  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.540624ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.326223  123634 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I0212 00:14:46.345161  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles/system:controller:bootstrap-signer: (1.428543ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.347478  123634 wrap.go:47] GET /api/v1/namespaces/kube-public: (1.716891ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.364953  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:46.365139  123634 wrap.go:47] GET /healthz: (1.15723ms) 500
goroutine 29026 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c709490, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c709490, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc019024760, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc01725a6f0, 0xc0001f7a40, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc01725a6f0, 0xc01c721d00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc01725a6f0, 0xc01c721d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc01725a6f0, 0xc01c721d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc01725a6f0, 0xc01c721d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc01725a6f0, 0xc01c721d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc01725a6f0, 0xc01c721d00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc01725a6f0, 0xc01c721d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc01725a6f0, 0xc01c721d00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc01725a6f0, 0xc01c721d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc01725a6f0, 0xc01c721d00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc01725a6f0, 0xc01c721d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc01725a6f0, 0xc01c721c00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc01725a6f0, 0xc01c721c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c5aade0, 0xc00d0a8f00, 0x60decc0, 0xc01725a6f0, 0xc01c721c00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38436]
I0212 00:14:46.365254  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles: (1.747692ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.365615  123634 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I0212 00:14:46.384982  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::extension-apiserver-authentication-reader: (1.379535ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.386699  123634 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.272948ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.416807  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.92971ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.417161  123634 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system::extension-apiserver-authentication-reader in kube-system
I0212 00:14:46.424953  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-controller-manager: (1.433299ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.426775  123634 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.356776ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.445967  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.378055ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.446288  123634 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I0212 00:14:46.466064  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:46.466265  123634 wrap.go:47] GET /healthz: (1.834586ms) 500
goroutine 28893 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c78cf50, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c78cf50, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc019082200, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc00d240c40, 0xc01c818140, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc00d240c40, 0xc01c533d00)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc00d240c40, 0xc01c533d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc00d240c40, 0xc01c533d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc00d240c40, 0xc01c533d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc00d240c40, 0xc01c533d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc00d240c40, 0xc01c533d00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc00d240c40, 0xc01c533d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc00d240c40, 0xc01c533d00)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc00d240c40, 0xc01c533d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc00d240c40, 0xc01c533d00)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc00d240c40, 0xc01c533d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc00d240c40, 0xc01c533c00)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc00d240c40, 0xc01c533c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c5392c0, 0xc00d0a8f00, 0x60decc0, 0xc00d240c40, 0xc01c533c00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38438]
I0212 00:14:46.472109  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-scheduler: (8.085999ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.478628  123634 wrap.go:47] GET /api/v1/namespaces/kube-system: (5.831343ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.488780  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (5.057133ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.489203  123634 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I0212 00:14:46.505255  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:bootstrap-signer: (1.689431ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.507244  123634 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.501519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.526168  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.61002ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.526412  123634 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I0212 00:14:46.544923  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:cloud-provider: (1.439723ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.547702  123634 wrap.go:47] GET /api/v1/namespaces/kube-system: (2.311752ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.567234  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (3.644946ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.567278  123634 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0212 00:14:46.567470  123634 wrap.go:47] GET /healthz: (3.486711ms) 500
goroutine 29007 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc01c773110, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc01c773110, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0190c61a0, 0x1f4)
net/http.Error(0x7f5686c12a48, 0xc00dca5490, 0xc0039bfe00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7f5686c12a48, 0xc00dca5490, 0xc01c87c100)
net/http.HandlerFunc.ServeHTTP(0xc00df07280, 0x7f5686c12a48, 0xc00dca5490, 0xc01c87c100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc0181b8e00, 0x7f5686c12a48, 0xc00dca5490, 0xc01c87c100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc005560fc0, 0x7f5686c12a48, 0xc00dca5490, 0xc01c87c100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x41c1830, 0xe, 0xc015c88090, 0xc005560fc0, 0x7f5686c12a48, 0xc00dca5490, 0xc01c87c100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5686c12a48, 0xc00dca5490, 0xc01c87c100)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca40, 0x7f5686c12a48, 0xc00dca5490, 0xc01c87c100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7f5686c12a48, 0xc00dca5490, 0xc01c87c100)
net/http.HandlerFunc.ServeHTTP(0xc00e356ea0, 0x7f5686c12a48, 0xc00dca5490, 0xc01c87c100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5686c12a48, 0xc00dca5490, 0xc01c87c100)
net/http.HandlerFunc.ServeHTTP(0xc00d46ca80, 0x7f5686c12a48, 0xc00dca5490, 0xc01c87c100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5686c12a48, 0xc00dca5490, 0xc01c87c000)
net/http.HandlerFunc.ServeHTTP(0xc014aa8820, 0x7f5686c12a48, 0xc00dca5490, 0xc01c87c000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01c85a900, 0xc00d0a8f00, 0x60decc0, 0xc00dca5490, 0xc01c87c000)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38438]
I0212 00:14:46.567504  123634 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0212 00:14:46.584789  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:token-cleaner: (1.30481ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.586771  123634 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.347236ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.607671  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (4.143887ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.607909  123634 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I0212 00:14:46.625253  123634 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings/system:controller:bootstrap-signer: (1.57873ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.627458  123634 wrap.go:47] GET /api/v1/namespaces/kube-public: (1.585679ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.645898  123634 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings: (2.280743ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.646226  123634 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I0212 00:14:46.665377  123634 wrap.go:47] GET /healthz: (1.237483ms) 200 [Go-http-client/1.1 127.0.0.1:38438]
W0212 00:14:46.666195  123634 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 00:14:46.666249  123634 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 00:14:46.666281  123634 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 00:14:46.666299  123634 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 00:14:46.666311  123634 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 00:14:46.666322  123634 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 00:14:46.666382  123634 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 00:14:46.666409  123634 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 00:14:46.666424  123634 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0212 00:14:46.666443  123634 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
I0212 00:14:46.666533  123634 factory.go:331] Creating scheduler from algorithm provider 'DefaultProvider'
I0212 00:14:46.666566  123634 factory.go:412] Creating scheduler with fit predicates 'map[MaxAzureDiskVolumeCount:{} CheckNodeMemoryPressure:{} CheckNodeDiskPressure:{} MatchInterPodAffinity:{} GeneralPredicates:{} CheckNodeCondition:{} CheckNodePIDPressure:{} CheckVolumeBinding:{} NoVolumeZoneConflict:{} MaxEBSVolumeCount:{} MaxGCEPDVolumeCount:{} NoDiskConflict:{} MaxCSIVolumeCountPred:{} PodToleratesNodeTaints:{}]' and priority functions 'map[LeastRequestedPriority:{} BalancedResourceAllocation:{} NodePreferAvoidPodsPriority:{} NodeAffinityPriority:{} TaintTolerationPriority:{} ImageLocalityPriority:{} SelectorSpreadPriority:{} InterPodAffinityPriority:{}]'
I0212 00:14:46.666776  123634 controller_utils.go:1021] Waiting for caches to sync for scheduler controller
I0212 00:14:46.667122  123634 reflector.go:132] Starting reflector *v1.Pod (12h0m0s) from k8s.io/kubernetes/test/integration/scheduler/util.go:210
I0212 00:14:46.667136  123634 reflector.go:170] Listing and watching *v1.Pod from k8s.io/kubernetes/test/integration/scheduler/util.go:210
I0212 00:14:46.668332  123634 wrap.go:47] GET /api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: (864.991µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38438]
I0212 00:14:46.669449  123634 get.go:251] Starting watch for /api/v1/pods, rv=19244 labels= fields=status.phase!=Failed,status.phase!=Succeeded timeout=9m59s
I0212 00:14:46.766994  123634 shared_informer.go:123] caches populated
I0212 00:14:46.767036  123634 controller_utils.go:1028] Caches are synced for scheduler controller
I0212 00:14:46.767601  123634 reflector.go:132] Starting reflector *v1.ReplicaSet (1s) from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.767632  123634 reflector.go:170] Listing and watching *v1.ReplicaSet from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.768058  123634 reflector.go:132] Starting reflector *v1.PersistentVolume (1s) from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.768093  123634 reflector.go:170] Listing and watching *v1.PersistentVolume from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.768127  123634 reflector.go:132] Starting reflector *v1.StatefulSet (1s) from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.768142  123634 reflector.go:170] Listing and watching *v1.StatefulSet from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.768661  123634 reflector.go:132] Starting reflector *v1.StorageClass (1s) from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.768692  123634 reflector.go:170] Listing and watching *v1.StorageClass from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.768811  123634 wrap.go:47] GET /apis/apps/v1/replicasets?limit=500&resourceVersion=0: (681.988µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.768839  123634 reflector.go:132] Starting reflector *v1.Node (1s) from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.768872  123634 reflector.go:170] Listing and watching *v1.Node from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.769011  123634 reflector.go:132] Starting reflector *v1.ReplicationController (1s) from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.769120  123634 reflector.go:170] Listing and watching *v1.ReplicationController from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.769221  123634 reflector.go:132] Starting reflector *v1.Service (1s) from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.769236  123634 reflector.go:170] Listing and watching *v1.Service from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.769418  123634 reflector.go:132] Starting reflector *v1beta1.PodDisruptionBudget (1s) from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.769437  123634 reflector.go:170] Listing and watching *v1beta1.PodDisruptionBudget from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.769658  123634 reflector.go:132] Starting reflector *v1.PersistentVolumeClaim (1s) from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.769672  123634 wrap.go:47] GET /api/v1/persistentvolumes?limit=500&resourceVersion=0: (643.441µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38634]
I0212 00:14:46.769666  123634 wrap.go:47] GET /api/v1/nodes?limit=500&resourceVersion=0: (535.49µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.769679  123634 reflector.go:170] Listing and watching *v1.PersistentVolumeClaim from k8s.io/client-go/informers/factory.go:132
I0212 00:14:46.770332  123634 wrap.go:47] GET /apis/apps/v1/statefulsets?limit=500&resourceVersion=0: (384.65µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38636]
I0212 00:14:46.770367  123634 wrap.go:47] GET /api/v1/replicationcontrollers?limit=500&resourceVersion=0: (400.261µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38640]
I0212 00:14:46.770426  123634 wrap.go:47] GET /apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: (501.943µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38638]
I0212 00:14:46.770927  123634 get.go:251] Starting watch for /apis/apps/v1/replicasets, rv=19244 labels= fields= timeout=6m52s
I0212 00:14:46.770947  123634 get.go:251] Starting watch for /api/v1/nodes, rv=19244 labels= fields= timeout=8m17s
I0212 00:14:46.771058  123634 wrap.go:47] GET /api/v1/services?limit=500&resourceVersion=0: (435.498µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38436]
I0212 00:14:46.771159  123634 wrap.go:47] GET /api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: (370.498µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38648]
I0212 00:14:46.771972  123634 get.go:251] Starting watch for /api/v1/persistentvolumes, rv=19244 labels= fields= timeout=7m18s
I0212 00:14:46.772015  123634 get.go:251] Starting watch for /api/v1/replicationcontrollers, rv=19244 labels= fields= timeout=7m59s
I0212 00:14:46.772204  123634 wrap.go:47] GET /apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: (1.656835ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38646]
I0212 00:14:46.772655  123634 get.go:251] Starting watch for /apis/apps/v1/statefulsets, rv=19244 labels= fields= timeout=6m7s
I0212 00:14:46.772813  123634 get.go:251] Starting watch for /apis/storage.k8s.io/v1/storageclasses, rv=19244 labels= fields= timeout=6m55s
I0212 00:14:46.772821  123634 get.go:251] Starting watch for /api/v1/services, rv=19249 labels= fields= timeout=9m59s
I0212 00:14:46.772862  123634 get.go:251] Starting watch for /apis/policy/v1beta1/poddisruptionbudgets, rv=19244 labels= fields= timeout=9m43s
I0212 00:14:46.772999  123634 get.go:251] Starting watch for /api/v1/persistentvolumeclaims, rv=19244 labels= fields= timeout=7m45s
I0212 00:14:46.867447  123634 shared_informer.go:123] caches populated
I0212 00:14:46.967689  123634 shared_informer.go:123] caches populated
I0212 00:14:47.067930  123634 shared_informer.go:123] caches populated
I0212 00:14:47.168155  123634 shared_informer.go:123] caches populated
I0212 00:14:47.268411  123634 shared_informer.go:123] caches populated
E0212 00:14:47.367925  123634 event.go:200] Unable to write event: 'Patch http://127.0.0.1:37285/api/v1/namespaces/prebind-plugin18934f9a-2e5b-11e9-8330-0242ac110002/events/test-pod.158275864c651794: dial tcp 127.0.0.1:37285: connect: connection refused' (may retry after sleeping)
I0212 00:14:47.368641  123634 shared_informer.go:123] caches populated
I0212 00:14:47.468949  123634 shared_informer.go:123] caches populated
I0212 00:14:47.569179  123634 shared_informer.go:123] caches populated
I0212 00:14:47.669362  123634 shared_informer.go:123] caches populated
I0212 00:14:47.769623  123634 shared_informer.go:123] caches populated
I0212 00:14:47.770691  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:47.770760  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:47.772472  123634 wrap.go:47] POST /api/v1/nodes: (2.386722ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38658]
I0212 00:14:47.774504  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:47.774606  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:47.775573  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:47.776805  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (3.100713ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38658]
I0212 00:14:47.777324  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0
I0212 00:14:47.777823  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0
I0212 00:14:47.781212  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.865664ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38658]
I0212 00:14:47.781951  123634 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0", node "node1"
I0212 00:14:47.781994  123634 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I0212 00:14:47.782148  123634 factory.go:733] Attempting to bind rpod-0 to node1
I0212 00:14:47.782230  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1
I0212 00:14:47.782251  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1
I0212 00:14:47.782389  123634 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1", node "node1"
I0212 00:14:47.782419  123634 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I0212 00:14:47.782596  123634 factory.go:733] Attempting to bind rpod-1 to node1
I0212 00:14:47.784826  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-0/binding: (2.376877ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38658]
I0212 00:14:47.785247  123634 scheduler.go:571] pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 00:14:47.787666  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1/binding: (2.287734ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38658]
I0212 00:14:47.788013  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.007337ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0212 00:14:47.788180  123634 scheduler.go:571] pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 00:14:47.790591  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.379103ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0212 00:14:47.884507  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-0: (2.255298ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0212 00:14:47.987498  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1: (1.99489ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0212 00:14:47.987885  123634 preemption_test.go:561] Creating the preemptor pod...
I0212 00:14:47.990513  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.342024ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0212 00:14:47.990741  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:47.990756  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:47.990777  123634 preemption_test.go:567] Creating additional pods...
I0212 00:14:47.990916  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:47.990961  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:47.992800  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.783154ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0212 00:14:47.992923  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.760234ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38658]
I0212 00:14:47.993511  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.724428ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0212 00:14:47.994688  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod/status: (2.89378ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38674]
I0212 00:14:47.995134  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.935923ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38658]
I0212 00:14:47.996346  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.280718ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0212 00:14:47.996626  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:47.997649  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.828653ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38658]
I0212 00:14:47.998957  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod/status: (1.929814ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0212 00:14:48.000700  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.263451ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38658]
I0212 00:14:48.002908  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.704738ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38658]
I0212 00:14:48.004012  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1: (4.61135ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0212 00:14:48.004275  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:48.004300  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:48.004420  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.004460  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.005002  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.696158ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38658]
I0212 00:14:48.007256  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod/status: (2.108284ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0212 00:14:48.007371  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.946427ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38658]
I0212 00:14:48.007643  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.785597ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0212 00:14:48.009195  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.343446ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0212 00:14:48.009847  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.791836ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0212 00:14:48.010120  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:48.010134  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:48.010266  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (5.789718ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0212 00:14:48.010270  123634 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod", node "node1"
I0212 00:14:48.010284  123634 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I0212 00:14:48.010326  123634 factory.go:733] Attempting to bind preemptor-pod to node1
I0212 00:14:48.010587  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-0
I0212 00:14:48.010600  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-0
I0212 00:14:48.010715  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.010758  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.015302  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod/binding: (3.853394ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38678]
I0212 00:14:48.015519  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (4.083261ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38684]
I0212 00:14:48.015830  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0/status: (4.009018ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38682]
I0212 00:14:48.016077  123634 scheduler.go:571] pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 00:14:48.016281  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (3.737506ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0212 00:14:48.018971  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (2.429826ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38678]
I0212 00:14:48.019369  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.350049ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0212 00:14:48.019381  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.019594  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-2
I0212 00:14:48.019634  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-2
I0212 00:14:48.019827  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.019940  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.019950  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/preemptor-pod.15827591233f914f: (7.012985ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0212 00:14:48.022573  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2/status: (2.303357ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38678]
I0212 00:14:48.022841  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.108071ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38684]
I0212 00:14:48.022919  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.148839ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38686]
I0212 00:14:48.023120  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (2.812978ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0212 00:14:48.024646  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.401874ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38684]
I0212 00:14:48.024869  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (1.856951ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38678]
I0212 00:14:48.025141  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.025282  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3
I0212 00:14:48.025308  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3
I0212 00:14:48.025381  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.975064ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0212 00:14:48.025419  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.025468  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.027897  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (2.161248ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38686]
I0212 00:14:48.028244  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.095771ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38688]
I0212 00:14:48.029171  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3/status: (3.431393ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0212 00:14:48.030405  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.703602ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38686]
I0212 00:14:48.030725  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (1.146855ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0212 00:14:48.030893  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (5.377623ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38678]
I0212 00:14:48.031081  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.031268  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-1
I0212 00:14:48.031289  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-1
I0212 00:14:48.031391  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.031458  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.033227  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.269396ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38686]
I0212 00:14:48.034573  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (2.336991ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38688]
I0212 00:14:48.035186  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1/status: (2.705636ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38690]
I0212 00:14:48.036280  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.325855ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38686]
I0212 00:14:48.036466  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (5.100479ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0212 00:14:48.037361  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (1.147709ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38690]
I0212 00:14:48.037703  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.037856  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-1
I0212 00:14:48.037879  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-1
I0212 00:14:48.037980  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.038047  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.038652  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.728883ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38688]
I0212 00:14:48.039176  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.357465ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38686]
I0212 00:14:48.041069  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (1.637409ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38688]
I0212 00:14:48.041309  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.706794ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38694]
I0212 00:14:48.041527  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1/status: (2.887912ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38690]
I0212 00:14:48.043654  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (1.717801ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38696]
I0212 00:14:48.043929  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.044077  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:48.044097  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:48.044098  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.296051ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.044165  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.044215  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.045794  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-1.1582759125a93dff: (2.453394ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38686]
I0212 00:14:48.045866  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.415373ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.047894  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.632767ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.048244  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15/status: (2.820457ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38698]
I0212 00:14:48.048321  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (3.775819ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38696]
I0212 00:14:48.049948  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.268644ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.050219  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.050379  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.658062ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38686]
I0212 00:14:48.050441  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:48.050465  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:48.050606  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.050663  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.052799  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (1.476465ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38700]
I0212 00:14:48.052891  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18/status: (1.967883ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.053243  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.41846ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38686]
I0212 00:14:48.053290  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.96991ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38702]
I0212 00:14:48.054566  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (1.214607ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.054799  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.055114  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:48.055157  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:48.055326  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.055413  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.673178ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38686]
I0212 00:14:48.056301  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.058462  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.804727ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.058772  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15/status: (1.78361ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38686]
I0212 00:14:48.059088  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.031608ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0212 00:14:48.060371  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.2488ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.060667  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.060808  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:48.060817  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:48.060897  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.060940  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.060980  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-15.15827591266c25ab: (4.061454ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38700]
I0212 00:14:48.061465  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.691843ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0212 00:14:48.062877  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (1.583931ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38700]
I0212 00:14:48.063048  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.635401ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38706]
I0212 00:14:48.063639  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.691471ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38704]
I0212 00:14:48.064119  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21/status: (2.731444ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.065600  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.489285ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38706]
I0212 00:14:48.065928  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (1.33629ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.066293  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.066427  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:48.066448  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:48.066646  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.066693  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.067831  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.693537ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38706]
I0212 00:14:48.068687  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (1.298651ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.069756  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23/status: (2.366526ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38700]
I0212 00:14:48.070144  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.526708ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38708]
I0212 00:14:48.071081  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.960738ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.071904  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (1.087283ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38708]
I0212 00:14:48.072190  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.072365  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:48.072387  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:48.072501  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.072591  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.073903  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.127463ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.074442  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (1.647402ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38708]
I0212 00:14:48.074820  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.50486ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0212 00:14:48.074930  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26/status: (2.083292ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38706]
I0212 00:14:48.076086  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.706285ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.077032  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (1.713685ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38706]
I0212 00:14:48.077396  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.077668  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:48.077695  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:48.077815  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.077866  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.077923  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.379575ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.080053  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (1.82068ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.080642  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.30828ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0212 00:14:48.080669  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28/status: (2.431413ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38706]
I0212 00:14:48.082613  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (1.465822ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.082613  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.585983ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0212 00:14:48.083048  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.083200  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:48.083221  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:48.083224  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (4.671785ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38712]
I0212 00:14:48.083316  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.083377  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.085451  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.444302ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38714]
I0212 00:14:48.089217  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (4.200912ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38716]
I0212 00:14:48.089286  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30/status: (5.57152ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38692]
I0212 00:14:48.090753  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (1.097214ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38714]
I0212 00:14:48.090771  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (6.363068ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0212 00:14:48.091080  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.091734  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.037402ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38716]
I0212 00:14:48.092187  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:48.092214  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:48.092391  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.092464  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.094503  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (1.200547ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38718]
I0212 00:14:48.095195  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (3.074441ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38714]
I0212 00:14:48.095219  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32/status: (2.370662ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0212 00:14:48.096215  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.891197ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38720]
I0212 00:14:48.097337  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (1.545523ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38710]
I0212 00:14:48.097619  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.097728  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.898779ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38714]
I0212 00:14:48.097764  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:48.097783  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:48.097882  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.097938  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.100354  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.578129ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38724]
I0212 00:14:48.101082  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (2.198506ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38722]
I0212 00:14:48.101493  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (3.282516ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38720]
I0212 00:14:48.101723  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35/status: (2.074028ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38718]
I0212 00:14:48.103175  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.072036ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38718]
I0212 00:14:48.103496  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.103744  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:48.103785  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:48.103954  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.104017  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.104145  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.242858ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38720]
I0212 00:14:48.105244  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.062357ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38718]
I0212 00:14:48.106864  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.333829ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38720]
I0212 00:14:48.107320  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37/status: (3.078748ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38722]
I0212 00:14:48.108348  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.069302ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38724]
I0212 00:14:48.109387  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.742103ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38720]
I0212 00:14:48.110033  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (2.198029ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38722]
I0212 00:14:48.110315  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.110501  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:48.110529  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:48.110632  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.110704  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.112752  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.762596ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38724]
I0212 00:14:48.113623  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.640585ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38726]
I0212 00:14:48.114125  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35/status: (2.453548ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38722]
I0212 00:14:48.114535  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-35.15827591299fd228: (2.914162ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38718]
I0212 00:14:48.116505  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (2.003063ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38722]
I0212 00:14:48.116823  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.116835  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.973728ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38724]
I0212 00:14:48.116967  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:48.116980  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:48.117065  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.117106  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.119241  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.500058ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38730]
I0212 00:14:48.119286  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.997051ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38718]
I0212 00:14:48.120944  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (3.170525ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38728]
I0212 00:14:48.121323  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40/status: (3.970821ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38726]
I0212 00:14:48.121692  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.931966ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38730]
I0212 00:14:48.123203  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.361033ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38728]
I0212 00:14:48.123465  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.123692  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.570524ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38730]
I0212 00:14:48.123697  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:48.123723  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:48.123820  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.123897  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.125965  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.798773ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38728]
I0212 00:14:48.126364  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.188931ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38718]
I0212 00:14:48.126770  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.495031ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38734]
I0212 00:14:48.128346  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42/status: (2.006917ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38732]
I0212 00:14:48.128573  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.72301ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38718]
I0212 00:14:48.130049  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.211929ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38734]
I0212 00:14:48.130382  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.130691  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:48.130732  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:48.130856  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.836133ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38728]
I0212 00:14:48.130864  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.130913  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.133640  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.010874ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38736]
I0212 00:14:48.134121  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (3.028674ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38728]
I0212 00:14:48.134310  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45/status: (3.189374ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38734]
I0212 00:14:48.136236  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.401381ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38728]
I0212 00:14:48.136582  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.136783  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:48.136811  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:48.136938  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.137006  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.139092  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.402728ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38736]
I0212 00:14:48.140088  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.299159ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38738]
I0212 00:14:48.140457  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48/status: (2.755837ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38728]
I0212 00:14:48.142298  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.367821ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38738]
I0212 00:14:48.142630  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.142800  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:48.142820  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:48.142941  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.143001  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.144608  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.321144ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38738]
I0212 00:14:48.146070  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45/status: (2.727462ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38736]
I0212 00:14:48.147049  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-45.158275912b970183: (3.131414ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38740]
I0212 00:14:48.147862  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.243508ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38736]
I0212 00:14:48.148191  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.148406  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:48.148427  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:48.148567  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.148646  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.155460  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (6.379762ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38738]
I0212 00:14:48.156130  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48/status: (7.139536ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38740]
I0212 00:14:48.158394  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-48.158275912bf3f639: (8.681876ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38742]
I0212 00:14:48.160655  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.317892ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38742]
I0212 00:14:48.160949  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.161101  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:48.161120  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:48.161213  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.161293  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.164181  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49/status: (2.577303ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38742]
I0212 00:14:48.164313  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (2.489212ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38738]
I0212 00:14:48.165123  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.030859ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38744]
I0212 00:14:48.166713  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (1.486343ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38738]
I0212 00:14:48.167016  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.167195  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:48.167215  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:48.167293  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.167346  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.169530  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42/status: (1.908261ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38744]
I0212 00:14:48.170832  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-42.158275912b2b8eab: (2.872857ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38742]
I0212 00:14:48.172907  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.244836ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38742]
I0212 00:14:48.172909  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.969742ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38744]
I0212 00:14:48.173325  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.173611  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:48.173634  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:48.173830  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.173895  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.176221  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (2.028064ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38742]
I0212 00:14:48.176840  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-49.158275912d664bdc: (2.357491ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38744]
I0212 00:14:48.177077  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49/status: (2.871969ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38746]
I0212 00:14:48.179156  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (1.538681ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38742]
I0212 00:14:48.179583  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.179764  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:48.179783  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:48.179889  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.179950  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.182196  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.79691ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38748]
I0212 00:14:48.182966  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47/status: (2.747255ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38742]
I0212 00:14:48.184065  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.63479ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38748]
I0212 00:14:48.184625  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.196053ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38750]
I0212 00:14:48.184899  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.185081  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:48.185103  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:48.185207  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.185264  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.186644  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.190236ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38748]
I0212 00:14:48.187578  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.583048ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38752]
I0212 00:14:48.188229  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46/status: (2.746427ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38742]
I0212 00:14:48.189882  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.150476ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38752]
I0212 00:14:48.190199  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.190371  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:48.190390  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:48.190499  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.190583  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.191892  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.030423ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38748]
I0212 00:14:48.193226  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47/status: (2.381133ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38752]
I0212 00:14:48.193921  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-47.158275912e834f5c: (2.247702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0212 00:14:48.195269  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.078094ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38752]
I0212 00:14:48.195629  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.195808  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:48.195826  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:48.195944  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.195997  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.197419  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.173732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38748]
I0212 00:14:48.198668  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46/status: (2.445337ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0212 00:14:48.199719  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-46.158275912ed46448: (2.800503ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38756]
I0212 00:14:48.200351  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.226923ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0212 00:14:48.200661  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.200824  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:48.200843  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:48.201205  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.201279  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.203048  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.426909ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38748]
I0212 00:14:48.203346  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40/status: (1.816805ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38756]
I0212 00:14:48.204625  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-40.158275912ac45b09: (2.627709ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38758]
I0212 00:14:48.205696  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.245326ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38756]
I0212 00:14:48.206010  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.206189  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:48.206208  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:48.206276  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.206320  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.208130  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.152409ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38748]
I0212 00:14:48.209060  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44/status: (2.073744ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38758]
I0212 00:14:48.209966  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.483281ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38760]
I0212 00:14:48.211599  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.226923ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38758]
I0212 00:14:48.211915  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.212121  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:48.212144  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:48.212220  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.212272  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.215615  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43/status: (2.551913ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38758]
I0212 00:14:48.215803  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.422198ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38762]
I0212 00:14:48.216013  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (3.112314ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38748]
I0212 00:14:48.218591  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (2.301036ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38758]
I0212 00:14:48.218918  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.219333  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:48.219351  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:48.219456  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.219511  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.221294  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.363917ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38762]
I0212 00:14:48.221686  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44/status: (1.7438ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38748]
I0212 00:14:48.223164  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-44.158275913015b371: (2.382618ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0212 00:14:48.223823  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.269295ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38748]
I0212 00:14:48.224098  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.224252  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:48.224275  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:48.224394  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.224447  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.226422  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41/status: (1.663264ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0212 00:14:48.227511  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.741275ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38762]
I0212 00:14:48.228660  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (988.514µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38766]
I0212 00:14:48.228665  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (1.686797ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0212 00:14:48.228966  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.229130  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:48.229150  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:48.229224  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.229276  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.230952  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.259723ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38762]
I0212 00:14:48.231464  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37/status: (1.929511ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0212 00:14:48.232479  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.107112ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38762]
I0212 00:14:48.232500  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-37.1582759129fc7d6c: (2.369059ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0212 00:14:48.233026  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.11457ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0212 00:14:48.233392  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.233616  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:48.233638  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:48.233773  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.233839  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.235871  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41/status: (1.770688ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0212 00:14:48.238161  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-41.15827591312a49f5: (2.744661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38770]
I0212 00:14:48.238440  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (2.016257ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0212 00:14:48.238734  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.238894  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:48.238911  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:48.238919  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (4.580805ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38762]
I0212 00:14:48.239005  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.239086  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.241013  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.656401ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0212 00:14:48.241357  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.442857ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38772]
I0212 00:14:48.242522  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39/status: (3.159951ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38770]
I0212 00:14:48.244356  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.305662ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0212 00:14:48.244761  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.244951  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:48.244973  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:48.245091  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.245192  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.246646  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.20438ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38772]
I0212 00:14:48.247778  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.450918ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38774]
I0212 00:14:48.248103  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38/status: (2.421663ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0212 00:14:48.249744  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.151616ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38774]
I0212 00:14:48.250002  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.250213  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:48.250237  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:48.250343  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.250404  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.254479  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (3.773151ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38772]
I0212 00:14:48.255524  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-39.15827591320988a5: (4.05208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38776]
I0212 00:14:48.255890  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39/status: (5.019615ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38774]
I0212 00:14:48.257686  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.336314ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38776]
I0212 00:14:48.257975  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.258171  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:48.258191  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:48.258306  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.258405  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.260370  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.651047ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38776]
I0212 00:14:48.260822  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38/status: (2.132723ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38772]
I0212 00:14:48.262576  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.270139ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38772]
I0212 00:14:48.263065  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.263230  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:48.263250  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:48.263289  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-38.158275913266c937: (2.639539ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38776]
I0212 00:14:48.263451  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.263531  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.265089  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.289712ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38772]
I0212 00:14:48.266430  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36/status: (2.631885ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38778]
I0212 00:14:48.267274  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.028348ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.268193  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.230632ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38778]
I0212 00:14:48.268598  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.268926  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:48.268949  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:48.269242  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.269334  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.271244  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (1.544558ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.271379  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32/status: (1.625077ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38772]
I0212 00:14:48.273407  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (1.573621ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38772]
I0212 00:14:48.273677  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.273866  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:48.273886  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:48.273955  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.273993  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.274447  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-32.15827591294c5e13: (3.774405ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0212 00:14:48.281328  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36/status: (2.571371ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38772]
I0212 00:14:48.281766  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (3.103686ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.282680  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-36.15827591337ea149: (3.102364ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0212 00:14:48.283865  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.153625ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38772]
I0212 00:14:48.284152  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.284294  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:48.284307  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:48.284438  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.284476  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.286260  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (1.54406ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.287155  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30/status: (2.402345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0212 00:14:48.288906  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (1.078236ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0212 00:14:48.289143  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-30.1582759128c1b001: (3.801206ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38786]
I0212 00:14:48.289277  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.289471  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:48.289535  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:48.289723  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.289789  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.291223  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.178702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.292217  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.695401ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38788]
I0212 00:14:48.292906  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34/status: (2.812052ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0212 00:14:48.294630  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.276439ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38788]
I0212 00:14:48.294915  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.295079  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:48.295097  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:48.295220  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.295279  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.297392  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.660176ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.297594  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33/status: (2.039289ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38788]
I0212 00:14:48.297925  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.494099ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38790]
I0212 00:14:48.299475  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.357022ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38788]
I0212 00:14:48.299760  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.299953  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:48.299973  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:48.300097  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.300157  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.302103  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.558182ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.302900  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34/status: (2.464601ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38790]
I0212 00:14:48.304877  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.449061ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38790]
I0212 00:14:48.305165  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.305182  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-34.15827591350f3bbd: (4.174315ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38792]
I0212 00:14:48.305330  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:48.305370  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:48.305517  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.305618  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.307649  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33/status: (1.774596ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38790]
I0212 00:14:48.308118  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.61016ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.309244  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-33.15827591356316e5: (2.709902ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38794]
I0212 00:14:48.309574  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.417029ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38790]
I0212 00:14:48.309870  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.310082  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:48.310103  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:48.310215  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.310273  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.311927  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (1.349112ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.312179  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28/status: (1.667386ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38794]
I0212 00:14:48.313843  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (1.08885ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38794]
I0212 00:14:48.314182  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.314348  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:48.314365  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:48.314434  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.314477  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.315158  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-28.15827591286da35f: (3.368533ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38796]
I0212 00:14:48.315981  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.13449ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.316818  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31/status: (2.071192ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38794]
I0212 00:14:48.317841  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.780408ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38796]
I0212 00:14:48.318792  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.557364ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38794]
I0212 00:14:48.319126  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.319285  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:48.319307  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:48.319409  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.319461  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.321058  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (1.310273ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.322474  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26/status: (2.759986ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38796]
I0212 00:14:48.322833  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-26.15827591281d1a8e: (2.556563ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38798]
I0212 00:14:48.324416  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (1.416125ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38796]
I0212 00:14:48.324820  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.325006  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:48.325029  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:48.325127  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.325187  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.326928  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.343864ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.327338  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31/status: (1.897623ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38798]
I0212 00:14:48.329534  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-31.1582759136881129: (2.466293ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38800]
I0212 00:14:48.330106  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (2.118996ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38798]
I0212 00:14:48.330420  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.330603  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:48.330618  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:48.330696  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.330763  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.333078  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.528923ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38802]
I0212 00:14:48.333729  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29/status: (2.733011ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38800]
I0212 00:14:48.334506  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (956.08µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38802]
I0212 00:14:48.334754  123634 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I0212 00:14:48.335001  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (3.877368ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.335739  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.062454ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38800]
I0212 00:14:48.336119  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.336256  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (1.351363ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38802]
I0212 00:14:48.336282  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:48.336302  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:48.336398  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.336437  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.338918  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23/status: (1.920466ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38806]
I0212 00:14:48.339364  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (2.611944ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0212 00:14:48.340356  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (3.591392ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.341156  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-23.1582759127c319fe: (2.636332ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38808]
I0212 00:14:48.341405  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (1.874559ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38806]
I0212 00:14:48.341734  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.341920  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:48.341946  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:48.342054  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.342139  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.343111  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (1.559715ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0212 00:14:48.343727  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.398652ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38808]
I0212 00:14:48.344844  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (1.309118ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0212 00:14:48.346271  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-29.15827591378053b1: (2.906609ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.346705  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (1.364648ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0212 00:14:48.346747  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29/status: (1.94018ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38808]
I0212 00:14:48.348649  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5: (1.486827ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0212 00:14:48.348905  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.524205ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.349168  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.349312  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:48.349324  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:48.349438  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.349573  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.350234  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-6: (1.205998ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0212 00:14:48.352108  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27/status: (2.227665ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.353052  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.422393ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38812]
I0212 00:14:48.353571  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (2.774121ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38814]
I0212 00:14:48.356318  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (3.609162ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0212 00:14:48.356752  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (6.317475ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0212 00:14:48.357085  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.357302  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:48.357316  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:48.357406  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.357443  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.359896  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (2.771984ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38810]
I0212 00:14:48.360057  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (2.267714ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38812]
I0212 00:14:48.360316  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25/status: (2.537836ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0212 00:14:48.360764  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.661075ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38816]
I0212 00:14:48.361618  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (970.228µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38810]
I0212 00:14:48.362046  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (942.177µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0212 00:14:48.362271  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.362404  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:48.362415  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:48.362513  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.362585  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.368713  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (6.781317ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38810]
I0212 00:14:48.369578  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (6.168894ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38812]
I0212 00:14:48.370147  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21/status: (7.335553ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0212 00:14:48.374244  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (5.065551ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38810]
I0212 00:14:48.374847  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (4.092747ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0212 00:14:48.375122  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.375185  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-21.15827591276b6180: (11.267753ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38818]
I0212 00:14:48.375271  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:48.375281  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:48.375383  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.375426  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.378511  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (2.77738ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38812]
I0212 00:14:48.378987  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (4.116097ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38810]
I0212 00:14:48.379530  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.377226ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38820]
I0212 00:14:48.380439  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24/status: (4.4909ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38804]
I0212 00:14:48.382103  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (1.950407ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38810]
I0212 00:14:48.383940  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (1.03342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38810]
I0212 00:14:48.385035  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.769463ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38820]
I0212 00:14:48.385470  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.385622  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.246484ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38810]
I0212 00:14:48.385663  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:48.385677  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:48.385807  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.385858  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.387836  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (1.783869ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38820]
I0212 00:14:48.388055  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (1.677213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38822]
I0212 00:14:48.388429  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.998455ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38824]
I0212 00:14:48.389190  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (956.67µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38820]
I0212 00:14:48.390533  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22/status: (4.435487ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38812]
I0212 00:14:48.391964  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (2.355203ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38824]
I0212 00:14:48.391964  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (1.052739ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38812]
I0212 00:14:48.392523  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.392692  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:48.392712  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:48.392791  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.392822  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.393611  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.18865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38824]
I0212 00:14:48.394364  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.082682ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0212 00:14:48.395064  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.077358ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38824]
I0212 00:14:48.396879  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (1.444122ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38824]
I0212 00:14:48.397704  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24/status: (1.883309ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38822]
I0212 00:14:48.398610  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-24.158275913a2a1325: (3.626918ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0212 00:14:48.399658  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.606734ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38822]
I0212 00:14:48.399677  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (2.158042ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38824]
I0212 00:14:48.399952  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.400195  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:48.400214  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:48.400302  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.400926  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.402156  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (1.376976ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0212 00:14:48.402217  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (2.186827ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0212 00:14:48.403849  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22/status: (1.995117ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38832]
I0212 00:14:48.403933  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-22.158275913ac9396d: (2.615894ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38830]
I0212 00:14:48.404031  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.286675ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38828]
I0212 00:14:48.405614  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (1.250215ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38832]
I0212 00:14:48.405866  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.405915  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (1.429583ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0212 00:14:48.406055  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:48.406074  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:48.406206  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.406273  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.408901  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (2.433211ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38832]
I0212 00:14:48.409001  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18/status: (2.211305ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38834]
I0212 00:14:48.409001  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (2.684599ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0212 00:14:48.409475  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-18.1582759126ce897b: (2.173134ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38836]
I0212 00:14:48.410467  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (1.117308ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38834]
I0212 00:14:48.410811  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.411007  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:48.411028  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:48.411150  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.411192  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.411298  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (1.092386ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38836]
I0212 00:14:48.413330  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.626289ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38832]
I0212 00:14:48.413504  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (1.545146ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38838]
I0212 00:14:48.414088  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20/status: (2.619231ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38834]
I0212 00:14:48.414959  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.261995ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38836]
I0212 00:14:48.415309  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.301887ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38838]
I0212 00:14:48.415853  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.193833ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38834]
I0212 00:14:48.416122  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.416274  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:48.416295  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:48.416391  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.416471  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.416954  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (1.150249ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38836]
I0212 00:14:48.418169  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.061183ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38832]
I0212 00:14:48.418712  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19/status: (1.992589ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38834]
I0212 00:14:48.419394  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.109374ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38840]
I0212 00:14:48.419560  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (2.159939ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38836]
I0212 00:14:48.420969  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (1.034956ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38840]
I0212 00:14:48.421030  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.631328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38834]
I0212 00:14:48.421372  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.421521  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:48.421541  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:48.421666  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.421757  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.422984  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.474525ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38840]
I0212 00:14:48.424975  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20/status: (2.389406ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38832]
I0212 00:14:48.425088  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (2.670615ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38842]
I0212 00:14:48.425297  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.191829ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38840]
I0212 00:14:48.425537  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-20.158275913c4bd5a3: (2.53359ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38844]
I0212 00:14:48.426981  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.047006ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38842]
I0212 00:14:48.427016  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.133049ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38832]
I0212 00:14:48.427266  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.427460  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:48.427479  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:48.427626  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.427675  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.428503  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.141736ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38842]
I0212 00:14:48.430059  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.899624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38846]
I0212 00:14:48.430963  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (2.063652ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38842]
I0212 00:14:48.430972  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19/status: (3.02556ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38832]
I0212 00:14:48.430963  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-19.158275913c9be44f: (2.336405ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38848]
I0212 00:14:48.432789  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.308963ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38832]
I0212 00:14:48.432797  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.270556ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38846]
I0212 00:14:48.433084  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.433235  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:48.433261  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:48.433385  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.433454  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.434443  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.24091ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38832]
I0212 00:14:48.435416  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (1.630363ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38846]
I0212 00:14:48.436037  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.566587ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38850]
I0212 00:14:48.436848  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.614489ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38832]
I0212 00:14:48.438282  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17/status: (2.376807ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38846]
I0212 00:14:48.438293  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (973.497µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38852]
I0212 00:14:48.440024  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (1.218631ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38852]
I0212 00:14:48.440051  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.220922ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38850]
I0212 00:14:48.440605  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.440753  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:48.440771  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:48.440883  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.440957  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.442236  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.38664ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38850]
I0212 00:14:48.443889  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.009267ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38854]
I0212 00:14:48.443971  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.139136ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38850]
I0212 00:14:48.445941  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.528942ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38854]
I0212 00:14:48.446663  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16/status: (4.831051ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38852]
I0212 00:14:48.447743  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.351478ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38854]
I0212 00:14:48.448037  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (1.037046ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38852]
I0212 00:14:48.448279  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.448881  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:48.448933  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:48.449006  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (7.13732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38856]
I0212 00:14:48.449096  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.449237  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.450666  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (2.460468ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38854]
I0212 00:14:48.453726  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (3.737416ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38852]
I0212 00:14:48.454458  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17/status: (4.449278ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38850]
I0212 00:14:48.454613  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-17.158275913d9f6e36: (4.14983ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38858]
I0212 00:14:48.454705  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (3.593541ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38854]
I0212 00:14:48.456306  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (1.049009ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38850]
I0212 00:14:48.456753  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (1.63771ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38858]
I0212 00:14:48.457048  123634 preemption_test.go:598] Cleaning up all pods...
I0212 00:14:48.457107  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.457322  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:48.457383  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:48.457521  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.458712  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.464792  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16/status: (5.832411ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38852]
I0212 00:14:48.466188  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-16.158275913e11b1a1: (5.578166ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38860]
I0212 00:14:48.466626  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (1.385185ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38862]
I0212 00:14:48.466848  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (1.364544ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38852]
I0212 00:14:48.466954  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (9.103361ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38858]
I0212 00:14:48.467136  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.467329  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:48.467353  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:48.467522  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.467603  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.469909  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.555056ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.470420  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (2.647595ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38860]
I0212 00:14:48.471187  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9/status: (2.891792ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38864]
I0212 00:14:48.472800  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (5.184738ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38862]
I0212 00:14:48.473149  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (1.327854ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.473435  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.473648  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12
I0212 00:14:48.473671  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12
I0212 00:14:48.473775  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.473826  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.476708  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.88877ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38868]
I0212 00:14:48.476871  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12/status: (2.616776ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.476882  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (2.676361ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38860]
I0212 00:14:48.478686  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (1.35635ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.478940  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.479179  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:48.479199  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:48.479287  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.479326  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.480194  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (7.037013ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38862]
I0212 00:14:48.481165  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (1.630345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.483711  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-9.158275913fa88506: (2.996454ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38862]
I0212 00:14:48.483970  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9/status: (2.967145ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38870]
I0212 00:14:48.485329  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (4.609703ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38868]
I0212 00:14:48.485779  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (1.299108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38862]
I0212 00:14:48.486029  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.486380  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-5
I0212 00:14:48.486402  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-5
I0212 00:14:48.486578  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.486664  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.488738  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5: (1.624725ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.489087  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.506675ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38872]
I0212 00:14:48.489534  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5/status: (2.482098ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38862]
I0212 00:14:48.490629  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (4.921595ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38868]
I0212 00:14:48.491531  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5: (1.269234ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38872]
I0212 00:14:48.491843  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.492056  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-8
I0212 00:14:48.492116  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-8
I0212 00:14:48.492321  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.492379  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.495081  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.984404ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.495138  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (1.663717ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38874]
I0212 00:14:48.495604  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8/status: (2.927555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38872]
I0212 00:14:48.497001  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5: (5.862357ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38868]
I0212 00:14:48.499340  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (2.238526ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.499705  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.499860  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7
I0212 00:14:48.499875  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7
I0212 00:14:48.500179  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.500318  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.503136  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.12631ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38876]
I0212 00:14:48.504238  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7/status: (3.417589ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.505717  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-6: (8.473604ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38868]
I0212 00:14:48.505822  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (5.10531ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38874]
I0212 00:14:48.506445  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (1.706796ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.506772  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.507518  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11
I0212 00:14:48.507573  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11
I0212 00:14:48.507708  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.507776  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.510718  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (2.030469ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38876]
I0212 00:14:48.510769  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11/status: (2.700011ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.512046  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (5.756904ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38868]
I0212 00:14:48.512054  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.842165ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38878]
I0212 00:14:48.514044  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (1.953293ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.515210  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.516330  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11
I0212 00:14:48.516345  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11
I0212 00:14:48.517794  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (5.359585ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38868]
I0212 00:14:48.518022  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.518087  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.520595  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (1.16573ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38880]
I0212 00:14:48.522395  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-11.15827591420d822c: (3.012656ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.525509  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (6.992008ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38876]
I0212 00:14:48.530400  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11/status: (11.397795ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.531373  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (5.368663ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.538362  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (7.018755ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.539022  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.539275  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:48.539320  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:48.539442  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.539528  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.542667  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (7.13697ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.542767  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (2.914668ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38880]
I0212 00:14:48.542777  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13/status: (2.42074ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.543518  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.212975ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38884]
I0212 00:14:48.544992  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (1.77517ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0212 00:14:48.546092  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:48.546258  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:48.546283  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:48.546376  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:48.546527  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:48.548814  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (1.984775ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.549347  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (6.331843ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38880]
I0212 00:14:48.551839  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-13.1582759143f1f95f: (4.087314ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.559328  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13/status: (9.391279ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38884]
I0212 00:14:48.566893  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (17.062784ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38880]
I0212 00:14:48.571671  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (11.511086ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
E0212 00:14:48.572321  123634 scheduler.go:294] Error getting the updated preemptor pod object: pods "ppod-13" not found
I0212 00:14:48.574190  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (6.851081ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38880]
I0212 00:14:48.579163  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:48.579210  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:48.580746  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (5.666294ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.583325  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.9075ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.585976  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:48.586127  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:48.588150  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.672801ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.588973  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (6.537549ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.592811  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:48.592847  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:48.594655  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.520063ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.595727  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (6.335891ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.599137  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:48.599182  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:48.601119  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.689703ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.601988  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (5.861364ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.605713  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:48.605910  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:48.607451  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (5.104524ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.607781  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.549697ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.611592  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:48.611688  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:48.613811  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.547972ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.615737  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (7.834458ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.621121  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:48.621247  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:48.623098  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (6.389776ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.623838  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.200711ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.626740  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:48.626775  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:48.633642  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (6.598914ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.633765  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (10.083367ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.637760  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:48.637804  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:48.640662  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.503164ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.640891  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (6.581758ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.645794  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:48.645835  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:48.646982  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (5.659485ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.649088  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.72834ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.652795  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:48.652884  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:48.655159  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.918007ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.655935  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (8.48624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.661049  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:48.661141  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:48.661865  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (5.508716ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.664860  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.37227ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.666859  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:48.666895  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:48.668743  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (6.567083ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.670167  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.356156ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.676081  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (6.24835ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.678717  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:48.678760  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:48.679948  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:48.679992  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:48.680912  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.889413ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.682060  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (5.398206ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.684397  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.969454ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.685585  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:48.685614  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:48.686972  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (4.444703ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.687382  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.5343ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.690443  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:48.690510  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:48.692223  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (4.493039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.692436  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.643674ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.696782  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:48.696874  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:48.698087  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (5.411299ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.700751  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.413448ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.702043  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:48.702208  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:48.704036  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (5.573821ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.706645  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.431674ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.708650  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:48.708691  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:48.710469  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.458371ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.710574  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (5.734136ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.770862  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:48.771107  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:48.771148  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:48.771935  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:48.774656  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:48.774852  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:48.775758  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:48.789597  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (15.840902ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.790783  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (79.747548ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.794400  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:48.794431  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:48.799783  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (5.013548ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.804889  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (13.640909ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.808247  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:48.808292  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:48.810196  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.598066ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.811887  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (6.674353ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.815224  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:48.815274  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:48.817346  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.832167ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.820669  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (8.394804ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.824443  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:48.824520  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:48.826989  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.740251ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.829503  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (8.318765ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.833834  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:48.833889  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:48.835657  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (5.068977ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.836619  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.223194ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.840921  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:48.840957  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:48.842843  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (6.047938ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.843120  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.874577ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.850040  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (5.90805ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.851435  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:48.851471  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:48.853699  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.854714ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.854005  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:48.854047  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:48.856166  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.904979ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.856403  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (5.492209ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.860738  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:48.860769  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:48.861775  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (4.441363ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.862469  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.402486ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.866041  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:48.866087  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:48.867890  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.571109ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.868835  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (6.550363ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.875503  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:48.875588  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:48.878010  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (8.882705ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.883515  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (7.616302ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.883965  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (5.263961ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.891865  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:48.891905  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:48.892216  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:48.892245  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:48.894919  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.549294ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.896535  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (12.231253ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.898827  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.851643ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.901360  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:48.901411  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:48.902223  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (5.240597ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.903434  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.592568ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:48.908742  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-0: (6.017261ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.910964  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1: (1.747092ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.917044  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (5.291575ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.919723  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (1.058697ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.922702  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (1.381975ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.925432  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (1.134531ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.928290  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (1.217515ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.938077  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (1.418695ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.941053  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5: (1.354373ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.943698  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-6: (1.072185ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.946533  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (1.211256ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.950140  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (2.035029ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.955117  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (3.326576ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.958288  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (1.484455ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.961304  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (1.106667ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.964163  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (1.210213ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.966860  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (1.163928ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.969930  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (1.298696ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.972468  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.045764ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.975158  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (995.209µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.978135  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (1.3295ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.980688  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (931.719µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.983242  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.019801ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.985647  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (881.097µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.988199  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (1.023021ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.990786  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (992.748µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.997023  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (1.1432ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:48.999988  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.052448ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.002896  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (1.110328ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.005651  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (1.208672ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.008232  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (1.015013ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.010752  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (950.821µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.013251  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.01673ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.016029  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (1.143996ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.018575  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.022981ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.021347  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (1.235462ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.025097  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.135051ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.027679  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (922.243µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.030019  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (855.833µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.032991  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.243193ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.035676  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.033766ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.046790  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (4.420951ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.049594  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.002845ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.053425  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.615866ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.057498  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (2.310823ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.069986  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (2.751439ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.073616  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.220792ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.076351  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.173106ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.079432  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.361362ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.083703  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (2.163029ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.086612  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.207428ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.089911  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.427602ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.094334  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (2.87612ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.098925  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-0: (2.643403ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.101927  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1: (1.005439ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.104900  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.432362ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.107510  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.053882ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.108763  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0
I0212 00:14:49.108783  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0
I0212 00:14:49.108923  123634 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0", node "node1"
I0212 00:14:49.108946  123634 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I0212 00:14:49.109016  123634 factory.go:733] Attempting to bind rpod-0 to node1
I0212 00:14:49.109903  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.91635ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.110665  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1
I0212 00:14:49.110716  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1
I0212 00:14:49.110869  123634 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1", node "node1"
I0212 00:14:49.110910  123634 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I0212 00:14:49.111017  123634 factory.go:733] Attempting to bind rpod-1 to node1
I0212 00:14:49.111372  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-0/binding: (2.065823ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:49.111636  123634 scheduler.go:571] pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 00:14:49.113738  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1/binding: (2.417042ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.114007  123634 scheduler.go:571] pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 00:14:49.114162  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.95385ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:49.116198  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.527494ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:49.213039  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-0: (2.163239ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:49.316090  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1: (2.190724ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:49.316417  123634 preemption_test.go:561] Creating the preemptor pod...
I0212 00:14:49.322457  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (5.717048ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:49.322998  123634 preemption_test.go:567] Creating additional pods...
I0212 00:14:49.323401  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:49.323414  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:49.323569  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.323621  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.327516  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod/status: (2.75624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.328004  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (3.293478ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39042]
I0212 00:14:49.328402  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (4.526658ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:49.332175  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.650911ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38886]
I0212 00:14:49.332742  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (4.306254ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0212 00:14:49.332923  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (4.090667ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39046]
I0212 00:14:49.333228  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.335238  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.072618ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39044]
I0212 00:14:49.336671  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod/status: (2.608819ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39042]
I0212 00:14:49.338566  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.867017ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39044]
I0212 00:14:49.342733  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.983027ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39044]
I0212 00:14:49.343141  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1: (6.088465ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39042]
I0212 00:14:49.344526  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:49.344568  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:49.344683  123634 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod", node "node1"
I0212 00:14:49.344704  123634 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I0212 00:14:49.344969  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4
I0212 00:14:49.345662  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4
I0212 00:14:49.345806  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.07453ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39044]
I0212 00:14:49.345624  123634 factory.go:733] Attempting to bind preemptor-pod to node1
I0212 00:14:49.348185  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.349759  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod/binding: (3.337713ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39044]
I0212 00:14:49.350091  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (3.019581ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39048]
I0212 00:14:49.350869  123634 scheduler.go:571] pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 00:14:49.353997  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (1.468953ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39048]
I0212 00:14:49.354791  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (3.072836ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39044]
I0212 00:14:49.355377  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (9.63552ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39042]
I0212 00:14:49.357129  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.757285ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39052]
I0212 00:14:49.362778  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (5.171041ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39052]
I0212 00:14:49.363060  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (7.02758ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39042]
I0212 00:14:49.363347  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.365131  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.279497ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39050]
I0212 00:14:49.366081  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.835923ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39052]
I0212 00:14:49.367201  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4/status: (3.306433ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39042]
I0212 00:14:49.368802  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (1.144772ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39042]
I0212 00:14:49.369003  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.369233  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3
I0212 00:14:49.369245  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3
I0212 00:14:49.369354  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.369397  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.371975  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.758501ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39050]
I0212 00:14:49.372979  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (3.090108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39042]
I0212 00:14:49.375817  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3/status: (2.203783ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39042]
I0212 00:14:49.388777  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (22.131583ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39052]
I0212 00:14:49.391461  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.205146ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39052]
I0212 00:14:49.393150  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (3.795905ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39050]
I0212 00:14:49.394171  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.394375  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10
I0212 00:14:49.394455  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10
I0212 00:14:49.394621  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.394664  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.396773  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (4.870313ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39052]
I0212 00:14:49.396901  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10/status: (2.002823ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39050]
I0212 00:14:49.397398  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (1.313096ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39062]
I0212 00:14:49.397589  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.759131ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39064]
I0212 00:14:49.398933  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.729755ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39050]
I0212 00:14:49.398942  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (1.630851ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39052]
I0212 00:14:49.399351  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.399498  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12
I0212 00:14:49.399520  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12
I0212 00:14:49.399641  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.399709  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.401421  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.078136ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39064]
I0212 00:14:49.404284  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.506215ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0212 00:14:49.405218  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12/status: (5.292264ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39062]
I0212 00:14:49.406678  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (4.854684ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39064]
I0212 00:14:49.406791  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (6.332137ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39066]
I0212 00:14:49.407135  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (1.463918ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39062]
I0212 00:14:49.407339  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.407838  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14
I0212 00:14:49.407854  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14
I0212 00:14:49.407970  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.408010  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.411150  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (3.949048ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39064]
I0212 00:14:49.411183  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14/status: (2.961149ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39062]
I0212 00:14:49.411689  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (2.662733ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0212 00:14:49.412434  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.695719ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39070]
I0212 00:14:49.413069  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (1.436497ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39062]
I0212 00:14:49.413637  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.413712  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.005488ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39064]
I0212 00:14:49.413896  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:49.413920  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:49.414004  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.414066  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.417359  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16/status: (2.578345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0212 00:14:49.417413  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (3.139679ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0212 00:14:49.417964  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.20269ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39074]
I0212 00:14:49.417793  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (3.575881ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39070]
I0212 00:14:49.420760  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.134544ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0212 00:14:49.421571  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (3.000065ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0212 00:14:49.421851  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.422163  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:49.422186  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:49.422312  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.422468  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.424238  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.340866ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0212 00:14:49.426369  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.894332ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39078]
I0212 00:14:49.426947  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (2.925057ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0212 00:14:49.427935  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17/status: (4.827433ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0212 00:14:49.430430  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (5.656066ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0212 00:14:49.433207  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (4.766328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0212 00:14:49.433657  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.433974  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.883253ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0212 00:14:49.434075  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:49.434095  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:49.434196  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.434509  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.442587  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (6.491876ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39080]
I0212 00:14:49.442880  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20/status: (8.026929ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0212 00:14:49.444199  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.972516ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39082]
I0212 00:14:49.444302  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (9.312223ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39078]
I0212 00:14:49.476366  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (32.07511ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0212 00:14:49.476988  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (31.841552ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39078]
I0212 00:14:49.477676  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.477888  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:49.477904  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:49.478013  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.478060  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.480192  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (1.408966ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39084]
I0212 00:14:49.480769  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.791474ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.480990  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23/status: (2.231684ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39080]
I0212 00:14:49.481707  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (3.925466ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39078]
I0212 00:14:49.483041  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (1.398311ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.483397  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.483619  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:49.483700  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:49.483857  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.483961  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.485355  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (3.026381ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39078]
I0212 00:14:49.486466  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.677973ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.486796  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20/status: (2.056887ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39084]
I0212 00:14:49.487596  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.716708ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39078]
I0212 00:14:49.488308  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-20.15827591794a0742: (3.454423ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39088]
I0212 00:14:49.488375  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.07449ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39084]
I0212 00:14:49.488700  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.488886  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:49.488911  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:49.488998  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.489052  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.489616  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.598894ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39078]
I0212 00:14:49.491050  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.586364ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.491724  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (1.237661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39090]
I0212 00:14:49.491942  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26/status: (2.604013ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39088]
I0212 00:14:49.492844  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.690695ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39078]
I0212 00:14:49.493717  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (1.291454ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39090]
I0212 00:14:49.493937  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.494125  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:49.494149  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:49.494288  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.494350  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.495376  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.827788ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39078]
I0212 00:14:49.496007  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (1.374066ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.496696  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.635822ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39092]
I0212 00:14:49.497920  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.014241ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39078]
I0212 00:14:49.497942  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28/status: (3.219165ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39090]
I0212 00:14:49.499659  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (1.111036ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.499883  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.500060  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:49.500105  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:49.500250  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.500314  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.500403  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.977973ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39092]
I0212 00:14:49.502183  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.439552ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.502856  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.871662ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39094]
I0212 00:14:49.502980  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30/status: (2.215759ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39092]
I0212 00:14:49.504264  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (2.044443ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39096]
I0212 00:14:49.504599  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (1.194502ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.504903  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.505071  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.689046ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39094]
I0212 00:14:49.505075  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:49.505151  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:49.505261  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.505313  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.506475  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (972.12µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.507095  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.577553ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39096]
I0212 00:14:49.507575  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.466631ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39100]
I0212 00:14:49.508025  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32/status: (2.054589ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39098]
I0212 00:14:49.509091  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.562574ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39096]
I0212 00:14:49.509531  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (1.049369ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39100]
I0212 00:14:49.509811  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.509941  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:49.509960  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:49.510061  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.510111  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.511357  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.847095ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39096]
I0212 00:14:49.512098  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (1.702591ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.513093  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-30.158275917d367953: (2.144437ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0212 00:14:49.513671  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.582827ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39096]
I0212 00:14:49.513984  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30/status: (3.59722ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39100]
I0212 00:14:49.515624  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (1.198117ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.516042  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.516095  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.8917ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0212 00:14:49.516192  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:49.516212  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:49.516318  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.516390  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.518802  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.817066ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39104]
I0212 00:14:49.518952  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37/status: (2.311444ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.519066  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.538972ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0212 00:14:49.519227  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.1139ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39106]
I0212 00:14:49.521044  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.397982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.521162  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.446272ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39104]
I0212 00:14:49.521336  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.521528  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:49.521564  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:49.521684  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.521742  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.523439  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.74522ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39104]
I0212 00:14:49.523947  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.49049ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39110]
I0212 00:14:49.524131  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.627514ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.525096  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40/status: (2.835158ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39108]
I0212 00:14:49.525649  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.703814ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39104]
I0212 00:14:49.526845  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.256032ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.534977  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.535232  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:49.535246  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:49.535390  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (9.319436ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39104]
I0212 00:14:49.535383  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.535692  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.538308  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (2.292662ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39110]
I0212 00:14:49.539123  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.698657ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39114]
I0212 00:14:49.539326  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42/status: (3.346759ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39086]
I0212 00:14:49.540215  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.699295ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39112]
I0212 00:14:49.541120  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.304562ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39114]
I0212 00:14:49.541754  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.542131  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.244045ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39110]
I0212 00:14:49.542111  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:49.542193  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:49.542365  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.542424  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.545338  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44/status: (2.648442ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39112]
I0212 00:14:49.545715  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.488866ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39118]
I0212 00:14:49.545718  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (3.04905ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39114]
I0212 00:14:49.547474  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (4.091097ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39116]
I0212 00:14:49.547922  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.777079ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39114]
I0212 00:14:49.548027  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (2.257806ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39112]
I0212 00:14:49.548637  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.548807  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:49.548842  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:49.548951  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.549002  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.550431  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.091713ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39114]
I0212 00:14:49.551612  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.921739ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39118]
I0212 00:14:49.553148  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46/status: (2.734364ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39120]
I0212 00:14:49.555016  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.39764ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39118]
I0212 00:14:49.555318  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.555568  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:49.555614  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:49.555868  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.555933  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.558939  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49/status: (2.777815ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39118]
I0212 00:14:49.559033  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.756453ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39122]
I0212 00:14:49.558979  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (2.33842ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39114]
I0212 00:14:49.561186  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (1.558527ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39122]
I0212 00:14:49.561630  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.561864  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:49.561878  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:49.561978  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.562026  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.564181  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.36635ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39118]
I0212 00:14:49.566030  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46/status: (3.191499ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39122]
I0212 00:14:49.567158  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-46.15827591801d5f0d: (3.26236ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39124]
I0212 00:14:49.567607  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.15736ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39122]
I0212 00:14:49.567920  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.568097  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:49.568118  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:49.568238  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.568300  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.569936  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.318213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39118]
I0212 00:14:49.570627  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.710557ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0212 00:14:49.570929  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48/status: (2.318338ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39124]
I0212 00:14:49.572816  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.312878ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0212 00:14:49.573123  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.573389  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:49.573471  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:49.573670  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.573733  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.576213  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47/status: (2.119603ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0212 00:14:49.576271  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (2.195367ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39118]
I0212 00:14:49.577477  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.520611ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39128]
I0212 00:14:49.578605  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.779295ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39118]
I0212 00:14:49.578891  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.579051  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:49.579069  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:49.579155  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.579209  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.580665  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.219391ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0212 00:14:49.581215  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48/status: (1.774202ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39128]
I0212 00:14:49.583214  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.316613ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39128]
I0212 00:14:49.583341  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-48.158275918143c428: (2.82254ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39130]
I0212 00:14:49.583618  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.583765  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:49.583786  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:49.583954  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.584009  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.585301  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.033814ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0212 00:14:49.586090  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47/status: (1.838957ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39128]
I0212 00:14:49.587477  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-47.158275918196bea3: (2.217782ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39132]
I0212 00:14:49.587871  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.318875ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39128]
I0212 00:14:49.588205  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.588378  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:49.588401  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:49.588501  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.588588  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.590439  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.266843ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0212 00:14:49.590662  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42/status: (1.861818ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39132]
I0212 00:14:49.592076  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-42.158275917f522c30: (2.448108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39134]
I0212 00:14:49.592088  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.071527ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39132]
I0212 00:14:49.592350  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.592564  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:49.592616  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:49.592719  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.592780  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.594664  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.148849ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0212 00:14:49.595149  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.5351ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39136]
I0212 00:14:49.595387  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45/status: (2.384896ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39132]
I0212 00:14:49.596930  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.101304ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39136]
I0212 00:14:49.597236  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.597420  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:49.597439  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:49.597609  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.597665  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.600097  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40/status: (2.223916ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39136]
I0212 00:14:49.600105  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.627729ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0212 00:14:49.601638  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-40.158275917e7d683e: (3.160055ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39138]
I0212 00:14:49.601718  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.278142ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0212 00:14:49.602059  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.602374  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:49.602396  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:49.602506  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.602608  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.604205  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.403573ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0212 00:14:49.604869  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45/status: (2.05207ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39136]
I0212 00:14:49.606273  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-45.1582759182b93632: (2.938156ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39140]
I0212 00:14:49.607195  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.257008ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39136]
I0212 00:14:49.607499  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.607713  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:49.607731  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:49.607821  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.607887  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.609747  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.586ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0212 00:14:49.610235  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.759112ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39142]
I0212 00:14:49.610284  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43/status: (2.06175ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39140]
I0212 00:14:49.613671  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.780605ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0212 00:14:49.613871  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.614041  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:49.614059  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:49.614139  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.614183  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.616241  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.168779ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39142]
I0212 00:14:49.616313  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37/status: (1.916505ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0212 00:14:49.618171  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-37.158275917e2b8939: (2.523401ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39144]
I0212 00:14:49.618832  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.55861ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0212 00:14:49.619214  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.619388  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:49.619412  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:49.619582  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.619636  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.621941  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43/status: (2.01802ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39144]
I0212 00:14:49.622025  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.490971ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39142]
I0212 00:14:49.623772  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-43.15827591839f8d6e: (3.176591ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39146]
I0212 00:14:49.623816  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.057968ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39142]
I0212 00:14:49.624098  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.624274  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:49.624297  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:49.624393  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.624442  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.626831  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (1.62524ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39144]
I0212 00:14:49.626975  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.684971ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39148]
I0212 00:14:49.627070  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41/status: (2.319727ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39146]
I0212 00:14:49.628837  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (1.343969ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39146]
I0212 00:14:49.629187  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.629395  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:49.629418  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:49.632132  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.632236  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.634810  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.858895ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39150]
I0212 00:14:49.634901  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39/status: (2.070499ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39148]
I0212 00:14:49.635250  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (2.431254ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39144]
I0212 00:14:49.636736  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.262988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39148]
I0212 00:14:49.637043  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.637236  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:49.637257  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:49.637401  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.637460  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.638975  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.245048ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39144]
I0212 00:14:49.639786  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38/status: (2.047519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39150]
I0212 00:14:49.640027  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.991448ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0212 00:14:49.641865  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.495012ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39150]
I0212 00:14:49.642206  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.642390  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:49.642413  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:49.642577  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.642641  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.644659  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.607396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39150]
I0212 00:14:49.644860  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39/status: (1.941411ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39144]
I0212 00:14:49.645979  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-39.1582759185136b53: (2.580609ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0212 00:14:49.646473  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.097903ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39144]
I0212 00:14:49.647343  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.647623  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:49.647695  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:49.647857  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.647946  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.650904  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.130513ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0212 00:14:49.651019  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38/status: (2.811674ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0212 00:14:49.652059  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (3.853896ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39150]
I0212 00:14:49.652529  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-38.1582759185632477: (3.619919ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39156]
I0212 00:14:49.653682  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.918873ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39158]
I0212 00:14:49.653985  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.654225  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:49.654248  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:49.654374  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.654425  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.656300  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.234863ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39160]
I0212 00:14:49.656979  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36/status: (2.268073ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39150]
I0212 00:14:49.657818  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.979146ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0212 00:14:49.658584  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.27092ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39150]
I0212 00:14:49.658987  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.659231  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:49.659252  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:49.659374  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.659460  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.661257  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (1.486247ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39160]
I0212 00:14:49.661796  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32/status: (1.993248ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0212 00:14:49.663376  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-32.158275917d82ad17: (2.871021ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39162]
I0212 00:14:49.663382  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (1.141492ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0212 00:14:49.663765  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.663950  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:49.663971  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:49.664092  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.664166  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.665865  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.43579ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39160]
I0212 00:14:49.667684  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36/status: (3.281535ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39162]
I0212 00:14:49.668095  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-36.15827591866601a4: (2.582156ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39164]
I0212 00:14:49.669207  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.118196ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39162]
I0212 00:14:49.669632  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.669816  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:49.669833  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:49.669941  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.670016  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.672175  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.65056ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39160]
I0212 00:14:49.672895  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.031225ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0212 00:14:49.673651  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35/status: (3.32088ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39164]
I0212 00:14:49.675323  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.071777ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0212 00:14:49.675660  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.675849  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:49.675874  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:49.676101  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.676171  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.677868  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.405091ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39160]
I0212 00:14:49.678289  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.428281ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0212 00:14:49.678862  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34/status: (2.398095ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0212 00:14:49.681441  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.249988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0212 00:14:49.682095  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.682257  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:49.682280  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:49.682373  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.682427  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.684669  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.291475ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0212 00:14:49.685201  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35/status: (2.458574ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0212 00:14:49.686916  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.244528ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0212 00:14:49.687398  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.687591  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:49.687613  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:49.687744  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.687807  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.689273  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-35.1582759187539db5: (5.724281ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39170]
I0212 00:14:49.690183  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.856014ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0212 00:14:49.690202  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34/status: (2.052825ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0212 00:14:49.692761  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.722994ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0212 00:14:49.693137  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.693398  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-34.1582759187b1af48: (3.092547ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39170]
I0212 00:14:49.693436  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:49.693504  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:49.693706  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.693787  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.695590  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.32323ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0212 00:14:49.696957  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33/status: (2.824855ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0212 00:14:49.699655  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.38978ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0212 00:14:49.700077  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.700342  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:49.700367  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:49.700495  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (4.82645ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39172]
I0212 00:14:49.700474  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.700632  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.702524  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (1.637968ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0212 00:14:49.703497  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28/status: (2.616331ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0212 00:14:49.704146  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-28.158275917cdb59d2: (2.784215ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39172]
I0212 00:14:49.705455  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (1.298252ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0212 00:14:49.705765  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.705891  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:49.705923  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:49.706008  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.706079  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.708625  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33/status: (2.315305ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39172]
I0212 00:14:49.709467  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (2.477274ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0212 00:14:49.710919  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.276768ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39172]
I0212 00:14:49.711211  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-33.1582759188be2df7: (4.113956ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39174]
I0212 00:14:49.711299  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.711499  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:49.711538  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:49.711703  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.711776  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.714009  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.056613ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0212 00:14:49.714103  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31/status: (2.041806ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39172]
I0212 00:14:49.714691  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.049431ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39176]
I0212 00:14:49.715890  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.196635ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39172]
I0212 00:14:49.716284  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.716493  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:49.716513  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:49.716626  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.716679  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.719019  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (1.054025ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0212 00:14:49.719647  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26/status: (2.729663ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39176]
I0212 00:14:49.720431  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-26.158275917c8a9c2e: (2.761301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39178]
I0212 00:14:49.721195  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (1.102767ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39176]
I0212 00:14:49.721578  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.721726  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:49.721744  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:49.721818  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.721881  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.723714  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.113845ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0212 00:14:49.724274  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.401813ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39180]
I0212 00:14:49.725065  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29/status: (2.457994ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39178]
I0212 00:14:49.726721  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.211767ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39180]
I0212 00:14:49.727190  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.727440  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:49.727461  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:49.727626  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.727687  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.729304  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (1.417516ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39180]
I0212 00:14:49.730338  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.041844ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39182]
I0212 00:14:49.730477  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27/status: (2.58263ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39166]
I0212 00:14:49.732124  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (1.106924ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39182]
I0212 00:14:49.732388  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.732613  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:49.732633  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:49.732756  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.732808  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.734393  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.27851ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39180]
I0212 00:14:49.735248  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29/status: (2.101325ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39182]
I0212 00:14:49.736408  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-29.158275918a6b14fd: (2.730894ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39184]
I0212 00:14:49.737060  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.329682ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39182]
I0212 00:14:49.737349  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.737577  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:49.737595  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:49.737709  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.737766  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.739227  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (1.201879ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39180]
I0212 00:14:49.739641  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27/status: (1.62473ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39184]
I0212 00:14:49.741029  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-27.158275918ac3e518: (2.377986ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39186]
I0212 00:14:49.741325  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (1.193732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39184]
I0212 00:14:49.741687  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.741860  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:49.741882  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:49.742033  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.742095  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.743870  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (1.529867ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39186]
I0212 00:14:49.744315  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23/status: (1.986131ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39180]
I0212 00:14:49.744961  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-23.158275917be2eaf5: (2.130027ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39188]
I0212 00:14:49.746169  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (1.309622ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39180]
I0212 00:14:49.746471  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.746682  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:49.746702  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:49.746827  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.746873  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.748497  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (1.238961ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39186]
I0212 00:14:49.748958  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.523151ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39190]
I0212 00:14:49.749201  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25/status: (2.110661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39188]
I0212 00:14:49.751178  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (1.445544ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39190]
I0212 00:14:49.751589  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.751803  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:49.751856  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:49.752122  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.752208  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.753189  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.289282ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39190]
I0212 00:14:49.753452  123634 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I0212 00:14:49.755838  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (2.935401ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39186]
I0212 00:14:49.755998  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24/status: (2.371438ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39190]
I0212 00:14:49.756940  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.672559ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39192]
I0212 00:14:49.757095  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (2.966476ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39194]
I0212 00:14:49.757921  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.455453ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39190]
I0212 00:14:49.758185  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.758371  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:49.758417  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:49.758623  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.758690  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.758846  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (1.30999ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39192]
I0212 00:14:49.760246  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (1.253123ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39190]
I0212 00:14:49.761095  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25/status: (2.116306ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39186]
I0212 00:14:49.761178  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (1.909683ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39192]
I0212 00:14:49.762279  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-25.158275918be8a899: (2.483886ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39196]
I0212 00:14:49.762889  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (1.246698ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39186]
I0212 00:14:49.762921  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (1.248205ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39190]
I0212 00:14:49.763280  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.763454  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:49.763476  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:49.763697  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.763784  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.765221  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (1.837717ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39186]
I0212 00:14:49.765275  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.301084ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39196]
I0212 00:14:49.766011  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24/status: (1.774705ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39198]
I0212 00:14:49.767408  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-24.158275918c3a1152: (2.88545ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39200]
I0212 00:14:49.767456  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5: (1.614933ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39186]
I0212 00:14:49.768355  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.197064ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39198]
I0212 00:14:49.768813  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.769050  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:49.769074  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:49.769085  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-6: (1.210518ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39200]
I0212 00:14:49.769200  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.769266  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.770437  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (1.002936ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39198]
I0212 00:14:49.771253  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:49.771933  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (2.168296ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39202]
I0212 00:14:49.772018  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:49.772263  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.414018ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39204]
I0212 00:14:49.773424  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22/status: (3.896956ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39196]
I0212 00:14:49.773854  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (1.446151ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39202]
I0212 00:14:49.775197  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:49.775223  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (1.313929ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39204]
I0212 00:14:49.775230  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:49.775621  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.775762  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (1.454022ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39202]
I0212 00:14:49.775963  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:49.776687  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:49.776710  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:49.776922  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.777005  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.777695  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (1.296385ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39204]
I0212 00:14:49.779678  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (2.337619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39198]
I0212 00:14:49.780725  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.586026ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39208]
I0212 00:14:49.780899  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (1.91433ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39204]
I0212 00:14:49.780981  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21/status: (2.836325ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39206]
I0212 00:14:49.782640  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (1.186054ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39208]
I0212 00:14:49.783320  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.783076  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (1.61136ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39198]
I0212 00:14:49.783470  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:49.783496  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:49.783617  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.783654  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.784993  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (1.203246ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39198]
I0212 00:14:49.785754  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (1.545148ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39210]
I0212 00:14:49.787015  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (1.665423ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39198]
I0212 00:14:49.787924  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-22.158275918d3e5903: (2.645323ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39212]
I0212 00:14:49.788024  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22/status: (2.076795ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39208]
I0212 00:14:49.789264  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.408123ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39198]
I0212 00:14:49.789616  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (1.203004ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39212]
I0212 00:14:49.789987  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.790151  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:49.790170  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:49.790253  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.790523  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.792816  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (2.055577ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39212]
I0212 00:14:49.793151  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (1.912348ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39218]
I0212 00:14:49.794828  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (1.170263ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39218]
I0212 00:14:49.795427  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-21.158275918db47139: (4.150807ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39216]
I0212 00:14:49.795427  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21/status: (4.463231ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39210]
I0212 00:14:49.796377  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (1.169528ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39218]
I0212 00:14:49.797817  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (1.113193ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39216]
I0212 00:14:49.798094  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.798327  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:49.798393  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:49.798574  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.798629  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.798875  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.478644ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39212]
I0212 00:14:49.800688  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.335069ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39220]
I0212 00:14:49.800810  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19/status: (1.862038ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39216]
I0212 00:14:49.801361  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.611483ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39224]
I0212 00:14:49.801936  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.406668ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39212]
I0212 00:14:49.803098  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.893556ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39216]
I0212 00:14:49.803105  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (1.264197ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39224]
I0212 00:14:49.803393  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.803592  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:49.803617  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:49.803720  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.803768  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.804958  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (1.259622ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39212]
I0212 00:14:49.811032  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (6.649075ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39220]
I0212 00:14:49.811797  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-16.1582759178126bdc: (6.730451ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39228]
I0212 00:14:49.811944  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16/status: (7.166952ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39226]
I0212 00:14:49.811944  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (1.53086ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39212]
I0212 00:14:49.813960  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (1.504545ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39228]
I0212 00:14:49.814071  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.554758ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39220]
I0212 00:14:49.814245  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.814392  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:49.814416  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:49.814540  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.814636  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.815812  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (1.449561ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39220]
I0212 00:14:49.817347  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.911624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39230]
I0212 00:14:49.817479  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19/status: (2.550581ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39228]
I0212 00:14:49.818030  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (1.195262ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39220]
I0212 00:14:49.818169  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-19.158275918efe62ec: (2.739182ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39232]
I0212 00:14:49.818941  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.025205ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39230]
I0212 00:14:49.819231  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.819471  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:49.819499  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:49.819666  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.819686  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (1.302536ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39220]
I0212 00:14:49.819731  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.821098  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (1.123271ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39230]
I0212 00:14:49.822048  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (1.787927ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39234]
I0212 00:14:49.822681  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18/status: (2.711319ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39228]
I0212 00:14:49.823165  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.825251ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39236]
I0212 00:14:49.823861  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.249308ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39234]
I0212 00:14:49.824897  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (1.075088ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39228]
I0212 00:14:49.825188  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.825352  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14
I0212 00:14:49.825380  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14
I0212 00:14:49.825509  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (1.172616ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39234]
I0212 00:14:49.825499  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.825672  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.827321  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (1.36574ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39228]
I0212 00:14:49.827425  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.17017ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39240]
I0212 00:14:49.829211  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-14.1582759177b60e32: (2.92926ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39242]
I0212 00:14:49.829365  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14/status: (3.335498ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39230]
I0212 00:14:49.829610  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (1.674252ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39240]
I0212 00:14:49.831329  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (1.278186ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39242]
I0212 00:14:49.831524  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.278538ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39228]
I0212 00:14:49.831631  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.831810  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:49.831831  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:49.832167  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.832258  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.833770  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (1.296234ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39242]
I0212 00:14:49.834670  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (2.480978ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39228]
I0212 00:14:49.835806  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-18.1582759190405669: (2.554944ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39246]
I0212 00:14:49.836224  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18/status: (3.098097ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39244]
I0212 00:14:49.836348  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.350396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39228]
I0212 00:14:49.837976  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.187906ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39246]
I0212 00:14:49.837982  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (1.170326ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39242]
I0212 00:14:49.838228  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.838374  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12
I0212 00:14:49.838393  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12
I0212 00:14:49.838473  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.838576  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.841097  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (2.661034ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39246]
I0212 00:14:49.841137  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (1.675511ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39248]
I0212 00:14:49.841216  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12/status: (2.338031ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39242]
I0212 00:14:49.842347  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-12.15827591773712f4: (2.964274ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39250]
I0212 00:14:49.843494  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.231307ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39248]
I0212 00:14:49.843541  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (1.330456ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39242]
I0212 00:14:49.843886  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.844080  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:49.844102  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:49.844255  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.844325  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.845148  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.12828ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39248]
I0212 00:14:49.845887  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.339814ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39250]
I0212 00:14:49.847138  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.242824ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39258]
I0212 00:14:49.847182  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.5969ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39248]
I0212 00:14:49.847803  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15/status: (2.761254ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39252]
I0212 00:14:49.848894  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (1.089393ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.849361  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.09482ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39252]
I0212 00:14:49.849643  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.849815  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:49.849834  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:49.849949  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.850020  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.850749  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.411193ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.852827  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (2.572016ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39252]
I0212 00:14:49.853006  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13/status: (2.771636ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39250]
I0212 00:14:49.853738  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.683246ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.854103  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.418786ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39260]
I0212 00:14:49.856443  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (2.924692ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39250]
I0212 00:14:49.856900  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.857047  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:49.857059  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:49.857142  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.857184  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.857198  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.217091ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.858825  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.106939ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39252]
I0212 00:14:49.859722  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15/status: (2.201532ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39250]
I0212 00:14:49.859880  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.525953ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39262]
I0212 00:14:49.860250  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-15.1582759191b7a940: (2.373543ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.861585  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.310942ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39252]
I0212 00:14:49.861623  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.347506ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39250]
I0212 00:14:49.861819  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.861973  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:49.861990  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:49.862061  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.862119  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.863541  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.487745ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39250]
I0212 00:14:49.864032  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (1.347943ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39264]
I0212 00:14:49.865207  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.091175ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39250]
I0212 00:14:49.866096  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-13.15827591920e7846: (2.447434ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39266]
I0212 00:14:49.866109  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13/status: (3.140194ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.867026  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (1.182239ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39250]
I0212 00:14:49.867324  123634 preemption_test.go:598] Cleaning up all pods...
I0212 00:14:49.867888  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (1.374984ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.868239  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.868504  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11
I0212 00:14:49.868528  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11
I0212 00:14:49.868659  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.868722  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.870815  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (1.778829ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39264]
I0212 00:14:49.871862  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (4.351543ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39250]
I0212 00:14:49.871878  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11/status: (2.613128ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.873093  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.017079ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39268]
I0212 00:14:49.874303  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (1.278029ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39264]
I0212 00:14:49.874674  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.874874  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3
I0212 00:14:49.874914  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3
I0212 00:14:49.875053  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.875141  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.878431  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3/status: (2.490255ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39268]
I0212 00:14:49.878935  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (6.399866ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.879168  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-3.158275917568ccc0: (3.074272ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.879303  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (3.594897ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39264]
I0212 00:14:49.881154  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (1.232624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39268]
I0212 00:14:49.881422  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.881753  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:49.881775  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:49.881940  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.881993  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.885058  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.086243ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.885279  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (2.927062ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39264]
I0212 00:14:49.885910  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9/status: (2.458481ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39272]
I0212 00:14:49.886410  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (6.218546ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.887734  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (1.377133ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39264]
I0212 00:14:49.888023  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.890056  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7
I0212 00:14:49.890073  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7
I0212 00:14:49.890168  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.890239  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.891870  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (4.558346ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.892632  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.561376ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39274]
I0212 00:14:49.894435  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7/status: (2.443728ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39264]
I0212 00:14:49.897677  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (5.90087ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.899602  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (2.309182ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39264]
I0212 00:14:49.900031  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.900205  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4
I0212 00:14:49.900251  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4
I0212 00:14:49.900338  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7
I0212 00:14:49.901007  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7
I0212 00:14:49.901230  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.902082  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (9.1044ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.902137  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.902917  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.577258ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.903925  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (1.541971ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.906258  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-7.1582759194740828: (2.503315ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.907300  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5: (4.857607ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39274]
I0212 00:14:49.909241  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7/status: (3.102874ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.911075  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (1.312622ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.911498  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.911754  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-6
I0212 00:14:49.911847  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-6
I0212 00:14:49.912335  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-6: (4.74789ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39274]
I0212 00:14:49.912923  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:49.912939  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:49.913053  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.913125  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.914262  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.05299ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.918690  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (3.683995ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.919904  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-9.1582759193f65c91: (4.012962ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39276]
I0212 00:14:49.923443  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (10.721634ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39274]
I0212 00:14:49.924109  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9/status: (10.294166ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.926622  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (1.872982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.926971  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.927231  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-8
I0212 00:14:49.927277  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-8
I0212 00:14:49.927312  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11
I0212 00:14:49.927321  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11
I0212 00:14:49.929601  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:49.929647  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:49.929899  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.770022ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.931844  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (1.542475ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.932392  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11/status: (2.464212ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39254]
I0212 00:14:49.932864  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (8.82541ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39276]
I0212 00:14:49.934534  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (1.285548ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.935151  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:49.935620  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-11.15827591932be9fd: (3.801682ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39280]
I0212 00:14:49.937583  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:49.937667  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:49.939815  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.882022ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.940176  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (6.583099ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39276]
I0212 00:14:49.944191  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10
I0212 00:14:49.944247  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10
I0212 00:14:49.946182  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (5.510918ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.947924  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.337633ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:49.950599  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11
I0212 00:14:49.950699  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11
I0212 00:14:49.952947  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (6.416604ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.953694  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.410085ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:49.962466  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12
I0212 00:14:49.962595  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12
I0212 00:14:49.964447  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (10.884038ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.964824  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.85815ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:49.968166  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:49.968236  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:49.970259  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.679474ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:49.971041  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (6.094687ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.974725  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14
I0212 00:14:49.974781  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14
I0212 00:14:49.976855  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.788531ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:49.976983  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (5.380113ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.980721  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:49.980775  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:49.981990  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (4.642701ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.982706  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.600448ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:49.985303  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:49.985342  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:49.986730  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (4.332461ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.987331  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.55027ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:49.990703  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:49.990854  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:49.990990  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (3.832293ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.992835  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.619418ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:49.994719  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:49.994780  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:49.996105  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (4.754868ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:49.996770  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.723027ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:49.999591  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:49.999674  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:50.001026  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (4.508894ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.002870  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.482162ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.004798  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:50.004864  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:50.006409  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (4.889929ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.007129  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.891926ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.010261  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:50.010315  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:50.012104  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (5.261229ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.012664  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.869844ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.015752  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:50.015847  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:50.017163  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (4.423383ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.018141  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.006556ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.020419  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:50.020512  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:50.021977  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (4.451331ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.022758  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.830942ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.025354  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:50.025389  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:50.027159  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (4.610309ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.027429  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.691332ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.030319  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:50.030360  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:50.031637  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (4.026458ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.032248  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.569991ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.035179  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:50.035246  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:50.036328  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (4.181657ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.037257  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.6617ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.039447  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:50.039508  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:50.040995  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (4.249448ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.041675  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.806134ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.044086  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:50.044137  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:50.045859  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (4.497822ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.045861  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.460395ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.051590  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:50.051710  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:50.053818  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (7.503025ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.054170  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.715417ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.057147  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:50.057202  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:50.058611  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (4.474606ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.058960  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.350997ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.061405  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:50.061445  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:50.063812  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.804008ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.064572  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (5.638295ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.067829  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:50.067869  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:50.069843  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (4.971667ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.070628  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.436166ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.073507  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:50.073572  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:50.075444  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.624123ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.075574  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (5.000999ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.079599  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:50.079638  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:50.081392  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.559226ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.082207  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (5.503062ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.085121  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:50.085157  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:50.086316  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (3.793475ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.087102  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.721028ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.089532  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:50.089620  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:50.091172  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (4.479551ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.091846  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.019665ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.094150  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:50.094185  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:50.095748  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (4.238212ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.095876  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.477245ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.098804  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:50.098845  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:50.100170  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (4.083141ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.100809  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.556562ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.104034  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:50.104124  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:50.105378  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (4.631814ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.106098  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.719628ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.109106  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:50.109177  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:50.110308  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (4.268692ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.111097  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.577057ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.113520  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:50.113593  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:50.114709  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (3.901091ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.115507  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.576693ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.118193  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:50.118239  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:50.119758  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (4.357334ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.120716  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.209954ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.124409  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:50.124452  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:50.126226  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (5.567967ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.126621  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.781675ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.129381  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:50.129415  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:50.130700  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (3.912226ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.131065  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.334642ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.134208  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:50.134257  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:50.135995  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (4.847012ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.136294  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.648588ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.139262  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:50.139333  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:50.140929  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (4.336988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.141373  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.699075ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.144879  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:50.145011  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:50.146432  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (5.010511ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.147011  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.637814ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.173469  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:50.173572  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:50.175248  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (28.364253ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.175966  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.019944ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.179100  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:50.179146  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:50.180444  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (4.768741ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.181659  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.843946ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.203199  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-0: (22.221433ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.207696  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1: (3.02718ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.222695  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (14.260176ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.232837  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (4.287482ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.241233  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (1.784252ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.244449  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (1.355233ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.265400  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (1.48218ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.268191  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (1.14643ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.270992  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5: (1.119544ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.273720  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-6: (1.06762ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.276382  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (1.028198ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.278954  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (976.172µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.281348  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (834.14µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.283795  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (909.296µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.286412  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (1.060563ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.288909  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (923.009µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.291442  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (1.023182ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.294390  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (1.042923ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.296976  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (885.878µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.299529  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (1.018566ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.302077  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (986.604µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.304509  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (885.888µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.306980  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (923.305µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.309521  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (999.95µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.311991  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (914.479µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.314623  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (967.926µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.317070  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (882.95µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.319615  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (917.759µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.321899  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (783.532µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.324226  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (824.646µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.326653  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (830.91µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.329060  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (882.345µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.331572  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (891.373µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.333933  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (809.355µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.336732  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.200316ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.339508  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (920.757µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.342281  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.065305ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.344833  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (861.682µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.347229  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (855.876µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.349868  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (977.737µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.352372  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (932.041µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.360404  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (3.488855ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.363184  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.041961ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.366049  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.17841ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.368664  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (1.045769ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.371705  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.25922ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.374326  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.047464ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.376941  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.032348ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.379533  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.058705ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.382220  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.027912ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.384656  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (863.602µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.387113  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (884.955µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.389645  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (962.382µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.391970  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-0: (809.074µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.394464  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1: (944.448µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.396977  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.006188ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.399364  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.905549ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.399647  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0
I0212 00:14:50.399672  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0
I0212 00:14:50.399792  123634 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0", node "node1"
I0212 00:14:50.399815  123634 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I0212 00:14:50.399876  123634 factory.go:733] Attempting to bind rpod-0 to node1
I0212 00:14:50.401697  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.837944ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.401847  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1
I0212 00:14:50.401864  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1
I0212 00:14:50.402055  123634 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1", node "node1"
I0212 00:14:50.402077  123634 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I0212 00:14:50.402122  123634 factory.go:733] Attempting to bind rpod-1 to node1
I0212 00:14:50.402538  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-0/binding: (2.381584ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.402757  123634 scheduler.go:571] pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 00:14:50.403883  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1/binding: (1.545225ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.404108  123634 scheduler.go:571] pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 00:14:50.404377  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.384531ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.406015  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.24526ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.504772  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-0: (2.021036ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.607746  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1: (2.078664ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.608080  123634 preemption_test.go:561] Creating the preemptor pod...
I0212 00:14:50.610341  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.006346ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.610587  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:50.610612  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:50.610647  123634 preemption_test.go:567] Creating additional pods...
I0212 00:14:50.610743  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.610801  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.612569  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.552319ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.612681  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.7762ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.613811  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod/status: (2.364354ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39286]
I0212 00:14:50.613993  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.269834ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0212 00:14:50.615656  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.49556ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0212 00:14:50.616271  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.875007ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39286]
I0212 00:14:50.616526  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.617990  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.877113ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0212 00:14:50.618914  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod/status: (1.917061ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39286]
I0212 00:14:50.619954  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.519766ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0212 00:14:50.622199  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.869408ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0212 00:14:50.624189  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1: (4.49101ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39286]
I0212 00:14:50.624781  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.794109ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0212 00:14:50.630330  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:50.630418  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:50.630657  123634 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod", node "node1"
I0212 00:14:50.630739  123634 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I0212 00:14:50.630885  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4
I0212 00:14:50.630939  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4
I0212 00:14:50.631120  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.631209  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.631616  123634 factory.go:733] Attempting to bind preemptor-pod to node1
I0212 00:14:50.638298  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (13.644368ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39286]
I0212 00:14:50.640260  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod/binding: (2.310046ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39290]
I0212 00:14:50.640696  123634 scheduler.go:571] pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 00:14:50.640785  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (1.967673ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39286]
I0212 00:14:50.641085  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4/status: (3.27816ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39270]
I0212 00:14:50.641164  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (3.25285ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0212 00:14:50.641413  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.430637ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0212 00:14:50.642873  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (1.323906ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39290]
I0212 00:14:50.643587  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.820871ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0212 00:14:50.643634  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.643884  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3
I0212 00:14:50.643935  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3
I0212 00:14:50.644084  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.158653ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39294]
I0212 00:14:50.644101  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.644169  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.653700  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (1.761037ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39296]
I0212 00:14:50.654117  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.908745ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39294]
I0212 00:14:50.654133  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.318409ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0212 00:14:50.654852  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3/status: (3.061149ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39290]
I0212 00:14:50.656236  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.56676ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39296]
I0212 00:14:50.656652  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (1.376941ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39294]
I0212 00:14:50.656966  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.657173  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7
I0212 00:14:50.657200  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7
I0212 00:14:50.657283  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.657335  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.658424  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.55232ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39296]
I0212 00:14:50.658817  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (1.068422ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0212 00:14:50.659607  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.445557ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39298]
I0212 00:14:50.660158  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7/status: (2.417277ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39294]
I0212 00:14:50.661252  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.082193ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39296]
I0212 00:14:50.662193  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (1.091747ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39298]
I0212 00:14:50.662417  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.662697  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:50.662722  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:50.662809  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.662862  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.663757  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.037982ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39296]
I0212 00:14:50.664991  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (1.288169ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0212 00:14:50.665440  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9/status: (1.751018ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39298]
I0212 00:14:50.666174  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.009385ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39296]
I0212 00:14:50.666582  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.740834ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39300]
I0212 00:14:50.667129  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (1.31416ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39298]
I0212 00:14:50.667391  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.667635  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11
I0212 00:14:50.667659  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11
I0212 00:14:50.667741  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.667793  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.668353  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.543509ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39296]
I0212 00:14:50.669516  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (1.360649ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0212 00:14:50.670189  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11/status: (1.86548ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39300]
I0212 00:14:50.670662  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.573111ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39296]
I0212 00:14:50.671632  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.333336ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39302]
I0212 00:14:50.672305  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (1.232994ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39300]
I0212 00:14:50.672596  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.672751  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:50.672808  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:50.672931  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.672976  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.674320  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.081806ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39296]
I0212 00:14:50.674399  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (1.098606ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0212 00:14:50.675048  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13/status: (1.817592ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39300]
I0212 00:14:50.675671  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.058717ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39304]
I0212 00:14:50.676718  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (1.16737ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39300]
I0212 00:14:50.676943  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.70164ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39296]
I0212 00:14:50.677118  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.677371  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:50.677415  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:50.677523  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.677645  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.679573  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.183716ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39304]
I0212 00:14:50.679935  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15/status: (2.05955ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0212 00:14:50.680092  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.954925ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39306]
I0212 00:14:50.682131  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.693202ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39306]
I0212 00:14:50.682371  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.682513  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.503862ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39308]
I0212 00:14:50.682521  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:50.682988  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:50.682567  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.257075ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39304]
I0212 00:14:50.683102  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.683183  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.685007  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.263648ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.685868  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (2.416283ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0212 00:14:50.685993  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17/status: (2.545827ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39306]
I0212 00:14:50.686725  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.965989ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39310]
I0212 00:14:50.687367  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (1.005751ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0212 00:14:50.687672  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.687868  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:50.687923  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:50.688142  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.688238  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.688713  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.463787ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39310]
I0212 00:14:50.690114  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.483012ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.690696  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15/status: (2.114637ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0212 00:14:50.691510  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.73753ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0212 00:14:50.691684  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-15.15827591c3631425: (2.613937ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39310]
I0212 00:14:50.692252  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.091973ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0212 00:14:50.692464  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.692693  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:50.692746  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:50.692852  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.692899  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.694186  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.886722ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0212 00:14:50.694598  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.405247ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.695179  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.441442ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39316]
I0212 00:14:50.695874  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20/status: (2.747989ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0212 00:14:50.697129  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.624754ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0212 00:14:50.697359  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.109528ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39316]
I0212 00:14:50.697706  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.697907  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:50.697928  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:50.698042  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.698091  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.699223  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.607344ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0212 00:14:50.699376  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (1.066388ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.700045  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.503947ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39320]
I0212 00:14:50.700197  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22/status: (1.73074ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39318]
I0212 00:14:50.701942  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.870092ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0212 00:14:50.702095  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (1.444058ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39320]
I0212 00:14:50.702459  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.702722  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:50.702746  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:50.702872  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.702920  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.705570  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.194507ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39324]
I0212 00:14:50.705639  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (2.526249ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.705655  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.769911ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0212 00:14:50.706098  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24/status: (2.77303ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39322]
I0212 00:14:50.707646  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.534892ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.708207  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.688629ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39322]
I0212 00:14:50.708568  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.708799  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:50.708846  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:50.708974  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.709052  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.710213  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.17239ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.710348  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (1.050694ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39322]
I0212 00:14:50.711991  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.023258ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39326]
I0212 00:14:50.712087  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.29857ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.712093  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25/status: (2.089174ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0212 00:14:50.714128  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (1.436351ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39322]
I0212 00:14:50.714365  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.714519  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:50.714580  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:50.714721  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.714772  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.714848  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.230402ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.716241  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.022296ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39322]
I0212 00:14:50.716587  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24/status: (1.233467ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0212 00:14:50.718117  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-24.15827591c4e4c3b5: (2.657084ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.718369  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.684318ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39330]
I0212 00:14:50.719222  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.175513ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0212 00:14:50.719464  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.719684  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:50.719702  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:50.719809  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.719853  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.720241  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.4057ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.721691  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25/status: (1.563301ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0212 00:14:50.722684  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (2.298284ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39322]
I0212 00:14:50.723153  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-25.15827591c5425270: (2.094843ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.723194  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.723212ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39332]
I0212 00:14:50.724403  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (1.716534ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0212 00:14:50.725369  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.725643  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:50.725666  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:50.725753  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.725799  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.726003  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.020431ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.727286  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (1.174621ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39322]
I0212 00:14:50.727564  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22/status: (1.51383ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0212 00:14:50.728978  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-22.15827591c49b1578: (2.196528ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.729506  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.809282ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39322]
I0212 00:14:50.729754  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (1.770512ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0212 00:14:50.730066  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.730250  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:50.730274  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:50.730385  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.730572  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.731651  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.480498ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.731800  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.029514ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0212 00:14:50.732373  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.331974ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39336]
I0212 00:14:50.732806  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34/status: (1.906455ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0212 00:14:50.734017  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.625325ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.734654  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.354599ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39336]
I0212 00:14:50.734929  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.735094  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:50.735108  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:50.735200  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.735248  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.736348  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.806775ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.737138  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.310339ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39338]
I0212 00:14:50.737328  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.629931ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0212 00:14:50.737711  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36/status: (2.239716ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39336]
I0212 00:14:50.738153  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.385904ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.739869  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.321248ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.740010  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.840195ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0212 00:14:50.740287  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.740453  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:50.740525  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:50.740719  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.740775  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.742311  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.143754ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0212 00:14:50.742594  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38/status: (1.605379ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39338]
I0212 00:14:50.742831  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.56463ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0212 00:14:50.743360  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.574161ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39342]
I0212 00:14:50.744760  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.475525ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0212 00:14:50.744822  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.632554ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39338]
I0212 00:14:50.745159  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.745308  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:50.745327  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:50.745394  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.745468  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.746669  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.553325ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0212 00:14:50.747147  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (1.059959ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39346]
I0212 00:14:50.747840  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.682216ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0212 00:14:50.748066  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41/status: (2.270147ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39342]
I0212 00:14:50.748605  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.481401ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0212 00:14:50.750098  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (1.639591ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0212 00:14:50.750398  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.750664  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:50.750726  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:50.750773  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.67026ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0212 00:14:50.750886  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.750934  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.752248  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.119897ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0212 00:14:50.752771  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.361169ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39350]
I0212 00:14:50.753011  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43/status: (1.877611ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39346]
I0212 00:14:50.753728  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.174893ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39348]
I0212 00:14:50.755156  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.111117ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39350]
I0212 00:14:50.755454  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.755777  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:50.755821  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:50.755921  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.755988  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.755801  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.730019ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0212 00:14:50.758795  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.074638ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0212 00:14:50.758915  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (2.504286ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39350]
I0212 00:14:50.758960  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45/status: (2.545996ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39352]
I0212 00:14:50.759114  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.360844ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39354]
I0212 00:14:50.760867  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.409605ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39350]
I0212 00:14:50.761164  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.761366  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:50.761386  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:50.761470  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.761533  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.763280  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.158484ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0212 00:14:50.763820  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.651381ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39356]
I0212 00:14:50.763939  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47/status: (2.167309ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39350]
I0212 00:14:50.765420  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.151132ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39356]
I0212 00:14:50.765728  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.765945  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:50.765965  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:50.766087  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.766166  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.768067  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (1.640409ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39356]
I0212 00:14:50.768068  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.269086ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39358]
I0212 00:14:50.768282  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49/status: (1.869187ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0212 00:14:50.769859  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (1.131018ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0212 00:14:50.770145  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.770327  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:50.770346  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:50.770469  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.770526  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.771490  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:50.772198  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:50.772507  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47/status: (1.731619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39358]
I0212 00:14:50.772612  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.863827ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0212 00:14:50.773444  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-47.15827591c8631c5d: (2.287375ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0212 00:14:50.774163  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.077195ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0212 00:14:50.774445  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.774662  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:50.774690  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:50.774818  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.774883  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.775363  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:50.775593  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:50.776236  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:50.776503  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (1.314232ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0212 00:14:50.777368  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49/status: (2.21197ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39358]
I0212 00:14:50.778075  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-49.15827591c8a9d0b7: (2.301019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39362]
I0212 00:14:50.779336  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (1.135885ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39358]
I0212 00:14:50.779698  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.779891  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:50.779913  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:50.779994  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.780049  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.781790  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.175719ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0212 00:14:50.782356  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45/status: (2.061784ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39362]
I0212 00:14:50.784059  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-45.15827591c80e8415: (2.592721ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39364]
I0212 00:14:50.784620  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.451787ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39362]
I0212 00:14:50.784872  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.785045  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:50.785067  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:50.785197  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.785252  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.786801  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (1.141271ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0212 00:14:50.787190  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49/status: (1.582672ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39364]
I0212 00:14:50.788995  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (1.02468ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39364]
I0212 00:14:50.789257  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.789445  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:50.789474  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:50.789618  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.789680  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.789707  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-49.15827591c8a9d0b7: (2.800758ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39366]
I0212 00:14:50.791146  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.107894ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0212 00:14:50.792210  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48/status: (2.211602ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39364]
I0212 00:14:50.792533  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.674273ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39368]
I0212 00:14:50.793813  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.138278ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39364]
I0212 00:14:50.794118  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.794325  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:50.794348  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:50.794474  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.794575  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.795988  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.197627ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39368]
I0212 00:14:50.797034  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43/status: (2.229247ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0212 00:14:50.797675  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-43.15827591c7c164aa: (2.474115ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0212 00:14:50.798715  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.110941ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0212 00:14:50.799076  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.799220  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:50.799239  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:50.799338  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.799392  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.800904  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.211542ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39368]
I0212 00:14:50.801825  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48/status: (2.160023ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0212 00:14:50.802761  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-48.15827591ca1073dc: (2.448324ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39372]
I0212 00:14:50.804021  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.186891ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0212 00:14:50.804328  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.804498  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:50.804521  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:50.804652  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.804710  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.806660  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.755889ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39372]
I0212 00:14:50.806687  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.58504ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0212 00:14:50.806761  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46/status: (1.814189ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39368]
I0212 00:14:50.808607  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.202964ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39372]
I0212 00:14:50.808899  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.809164  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:50.809187  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:50.809266  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.809314  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.811002  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41/status: (1.462164ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39372]
I0212 00:14:50.811340  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (1.248159ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0212 00:14:50.812928  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-41.15827591c76e01b7: (2.767219ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0212 00:14:50.813760  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (1.087099ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39372]
I0212 00:14:50.814191  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.814457  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:50.814479  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:50.814705  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.814789  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.816209  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.167197ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0212 00:14:50.816884  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46/status: (1.78832ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0212 00:14:50.818619  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.154388ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0212 00:14:50.819063  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-46.15827591caf5f829: (3.136618ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39378]
I0212 00:14:50.819069  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.819277  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:50.819298  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:50.819370  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.819414  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.821357  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.580175ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0212 00:14:50.821526  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.443489ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39380]
I0212 00:14:50.822694  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44/status: (2.978664ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0212 00:14:50.824541  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.1291ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39380]
I0212 00:14:50.824840  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.825027  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:50.825052  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:50.825142  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.825193  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.827473  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.588298ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39382]
I0212 00:14:50.827606  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42/status: (2.160834ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39380]
I0212 00:14:50.827979  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (2.09181ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0212 00:14:50.829165  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.177783ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39382]
I0212 00:14:50.829479  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.830003  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:50.830023  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:50.830195  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.830262  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.831590  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.096597ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0212 00:14:50.832156  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44/status: (1.527513ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39380]
I0212 00:14:50.833297  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-44.15827591cbd65a62: (2.261909ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39384]
I0212 00:14:50.833804  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.04337ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39380]
I0212 00:14:50.834100  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.834314  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:50.834336  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:50.834447  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.834520  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.835864  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.085873ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39384]
I0212 00:14:50.836780  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42/status: (1.99772ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0212 00:14:50.837442  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-42.15827591cc2e83d0: (2.201502ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39386]
I0212 00:14:50.838234  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.102702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0212 00:14:50.838539  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.838756  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:50.838777  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:50.838908  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.838965  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.840860  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38/status: (1.614159ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39384]
I0212 00:14:50.840997  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.820277ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39386]
I0212 00:14:50.841989  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-38.15827591c72664e5: (2.406043ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39388]
I0212 00:14:50.842692  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.27168ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39384]
I0212 00:14:50.843022  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.843190  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:50.843210  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:50.843319  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.843375  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.844982  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.330899ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39388]
I0212 00:14:50.845690  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.635167ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0212 00:14:50.845737  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40/status: (2.095603ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39386]
I0212 00:14:50.847271  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.160948ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0212 00:14:50.847649  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.847871  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:50.848407  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:50.848622  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.848689  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.850077  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.113059ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0212 00:14:50.851189  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.844954ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39392]
I0212 00:14:50.851252  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39/status: (2.23244ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39388]
I0212 00:14:50.852762  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.063529ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39392]
I0212 00:14:50.852982  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.853131  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:50.853152  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:50.853246  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.853300  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.854859  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.187273ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0212 00:14:50.855304  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40/status: (1.785383ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39392]
I0212 00:14:50.856475  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-40.15827591cd43f246: (2.406314ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39394]
I0212 00:14:50.856993  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.194509ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39392]
I0212 00:14:50.857305  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.857507  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:50.857526  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:50.857665  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.857733  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.859166  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.154235ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0212 00:14:50.859890  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39/status: (1.926445ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39394]
I0212 00:14:50.861291  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-39.15827591cd950827: (2.819079ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39396]
I0212 00:14:50.861498  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.751519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0212 00:14:50.861807  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.297589ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39394]
I0212 00:14:50.862043  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.862217  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:50.862235  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:50.862349  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.862415  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.864211  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.573558ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0212 00:14:50.864297  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36/status: (1.601883ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39396]
I0212 00:14:50.865783  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-36.15827591c6d20881: (2.733397ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39398]
I0212 00:14:50.865956  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.216547ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39396]
I0212 00:14:50.866261  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.866453  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:50.866476  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:50.866628  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.866690  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.868519  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.24083ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0212 00:14:50.868817  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37/status: (1.879241ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39398]
I0212 00:14:50.869351  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.911499ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0212 00:14:50.870517  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.203771ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39398]
I0212 00:14:50.870846  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.871037  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:50.871059  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:50.871150  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.871200  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.872677  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.064177ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0212 00:14:50.873262  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34/status: (1.845342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0212 00:14:50.875117  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-34.15827591c68a5d8b: (2.697247ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0212 00:14:50.875140  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.491614ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0212 00:14:50.875474  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.875702  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:50.875720  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:50.875821  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.875896  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.877380  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.241216ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0212 00:14:50.879635  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37/status: (3.518717ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0212 00:14:50.879659  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-37.15827591cea7a696: (2.84305ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0212 00:14:50.881594  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.346242ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0212 00:14:50.881838  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.881985  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:50.882005  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:50.882113  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.882196  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.883643  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.094237ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0212 00:14:50.884379  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.471403ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39406]
I0212 00:14:50.885167  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35/status: (2.244093ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0212 00:14:50.886777  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.144175ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39406]
I0212 00:14:50.887020  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.887219  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:50.887240  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:50.887335  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.887376  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.888789  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.162058ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0212 00:14:50.889257  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33/status: (1.62871ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39406]
I0212 00:14:50.889732  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.747661ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0212 00:14:50.890872  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.20013ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39406]
I0212 00:14:50.891215  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.891396  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:50.891416  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:50.891620  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.891687  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.893858  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.521492ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0212 00:14:50.894385  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35/status: (2.035744ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0212 00:14:50.894767  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-35.15827591cf944f06: (2.304597ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39410]
I0212 00:14:50.895956  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.14606ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0212 00:14:50.896212  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.896363  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:50.896380  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:50.896496  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.896576  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.898535  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33/status: (1.661476ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0212 00:14:50.898582  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.210655ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39410]
I0212 00:14:50.899917  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-33.15827591cfe35c7d: (2.52925ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39412]
I0212 00:14:50.900016  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.078245ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0212 00:14:50.900292  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.900432  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:50.900448  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:50.900612  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.900670  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.901898  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (997.95µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39410]
I0212 00:14:50.902937  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.311206ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39414]
I0212 00:14:50.903953  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32/status: (3.007222ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39412]
I0212 00:14:50.905660  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (1.28937ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39414]
I0212 00:14:50.905930  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.906169  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:50.906189  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:50.906310  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.906369  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.907863  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.179077ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39410]
I0212 00:14:50.908470  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.523472ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39416]
I0212 00:14:50.908719  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31/status: (2.008256ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39414]
I0212 00:14:50.910281  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.167633ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39414]
I0212 00:14:50.910655  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.910826  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:50.910846  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:50.910950  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.911007  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.913073  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32/status: (1.810752ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39416]
I0212 00:14:50.913091  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (1.715901ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39414]
I0212 00:14:50.913867  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-32.15827591d0ae2dec: (2.272236ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39418]
I0212 00:14:50.915056  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (1.560653ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39414]
I0212 00:14:50.915407  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.915721  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:50.915744  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:50.915890  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.915947  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.917541  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.283568ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39418]
I0212 00:14:50.919145  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31/status: (2.066721ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39420]
I0212 00:14:50.919232  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-31.15827591d10526a3: (2.725987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39416]
I0212 00:14:50.920613  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.1249ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39420]
I0212 00:14:50.920848  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.921013  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:50.921031  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:50.921140  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.921197  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.922640  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (1.221244ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39418]
I0212 00:14:50.923284  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30/status: (1.85467ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39420]
I0212 00:14:50.923659  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.688519ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39422]
I0212 00:14:50.924943  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (1.156498ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39420]
I0212 00:14:50.925276  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.925505  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:50.925524  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:50.925668  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.925725  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.927773  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25/status: (1.801826ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39422]
I0212 00:14:50.927905  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (1.900652ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39418]
I0212 00:14:50.929052  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-25.15827591c5425270: (2.672165ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39424]
I0212 00:14:50.929650  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (1.390544ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39422]
I0212 00:14:50.929927  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.930127  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:50.930146  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:50.930281  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.930336  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.931877  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (1.254598ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39418]
I0212 00:14:50.932348  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30/status: (1.751144ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39424]
I0212 00:14:50.933901  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-30.15827591d1e76898: (2.844317ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39426]
I0212 00:14:50.934735  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (1.897455ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39424]
I0212 00:14:50.935076  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.935295  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:50.935317  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:50.935434  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.935499  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.937398  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.21425ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39418]
I0212 00:14:50.938572  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29/status: (2.324556ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39426]
I0212 00:14:50.938933  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.327397ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39428]
I0212 00:14:50.940099  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.146476ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39426]
I0212 00:14:50.940503  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.940743  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:50.940768  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:50.940867  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.940911  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.943132  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (1.44641ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39418]
I0212 00:14:50.943802  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.058128ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39430]
I0212 00:14:50.943811  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28/status: (2.641247ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39428]
I0212 00:14:50.945633  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (1.192043ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39430]
I0212 00:14:50.945889  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.946115  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:50.946139  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:50.946243  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.946331  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.948615  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.943293ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39430]
I0212 00:14:50.948775  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29/status: (1.980451ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39418]
I0212 00:14:50.950568  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.209248ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39418]
I0212 00:14:50.950899  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.951094  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:50.951113  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:50.951213  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.951264  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.951640  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-29.15827591d2c16496: (2.586998ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39432]
I0212 00:14:50.953102  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (1.384417ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39430]
I0212 00:14:50.954599  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-28.15827591d3143e0f: (2.451769ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39432]
I0212 00:14:50.955388  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28/status: (3.840043ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39418]
I0212 00:14:50.957209  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (1.233803ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39432]
I0212 00:14:50.957599  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.957807  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:50.957831  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:50.957925  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.957987  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.959389  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (1.123902ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39430]
I0212 00:14:50.959946  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27/status: (1.713879ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39432]
I0212 00:14:50.960361  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.752168ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39434]
I0212 00:14:50.961587  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (1.246946ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39432]
I0212 00:14:50.961945  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.962172  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:50.962193  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:50.962325  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.962376  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.963362  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.367568ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39434]
I0212 00:14:50.963655  123634 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I0212 00:14:50.964753  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26/status: (1.795423ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39436]
I0212 00:14:50.965191  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (2.312772ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39430]
I0212 00:14:50.965333  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (1.53775ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39434]
I0212 00:14:50.965858  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.722203ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39438]
I0212 00:14:50.966387  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (1.224243ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39436]
I0212 00:14:50.966792  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.966949  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:50.966969  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:50.967031  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (1.228285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39434]
I0212 00:14:50.967038  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.967099  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.968337  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (1.033521ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39438]
I0212 00:14:50.969324  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (1.741026ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39434]
I0212 00:14:50.970000  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-27.15827591d418c1fa: (2.181464ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39440]
I0212 00:14:50.970606  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27/status: (3.253028ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39430]
I0212 00:14:50.970947  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (1.197252ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39434]
I0212 00:14:50.972431  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (1.124586ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39440]
I0212 00:14:50.972717  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (1.019865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39438]
I0212 00:14:50.973068  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.973229  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:50.973250  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:50.973349  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.973389  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.974619  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5: (1.680509ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39440]
I0212 00:14:50.975532  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (1.592111ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39442]
I0212 00:14:50.975583  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26/status: (1.764074ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39438]
I0212 00:14:50.977092  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-6: (1.875156ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39444]
I0212 00:14:50.977647  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-26.15827591d45bbe3c: (2.701602ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39440]
I0212 00:14:50.977769  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (1.590541ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39442]
I0212 00:14:50.978326  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.978603  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:50.978629  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:50.978737  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.978783  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (1.295957ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39444]
I0212 00:14:50.978789  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.980886  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20/status: (1.798049ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39440]
I0212 00:14:50.981057  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.518005ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39448]
I0212 00:14:50.981129  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (1.284279ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39446]
I0212 00:14:50.981851  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-20.15827591c44bde12: (2.38527ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39438]
I0212 00:14:50.982540  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.103658ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39448]
I0212 00:14:50.982816  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.983014  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:50.983033  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:50.983125  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.983164  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.983382  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (1.06651ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39446]
I0212 00:14:50.985895  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (2.180741ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39440]
I0212 00:14:50.986140  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (2.120177ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39452]
I0212 00:14:50.986242  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.126143ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39446]
I0212 00:14:50.986429  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23/status: (2.973414ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39448]
I0212 00:14:50.987893  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (1.367977ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39440]
I0212 00:14:50.988166  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (1.266058ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39448]
I0212 00:14:50.988433  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.989097  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:50.989162  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:50.989298  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.989370  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.989412  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (1.107784ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39440]
I0212 00:14:50.990719  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (978.35µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39440]
I0212 00:14:50.991327  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21/status: (1.619515ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39448]
I0212 00:14:50.992061  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (1.839553ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39454]
I0212 00:14:50.992797  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (1.094573ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39448]
I0212 00:14:50.993118  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.993331  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:50.993370  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:50.993519  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.993584  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.994088  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (1.129016ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39454]
I0212 00:14:50.995014  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (1.236813ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39448]
I0212 00:14:50.995574  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (5.309674ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39450]
I0212 00:14:50.995779  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.37586ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39454]
I0212 00:14:50.996443  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23/status: (2.662786ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39440]
I0212 00:14:50.997939  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (1.122803ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39448]
I0212 00:14:50.998093  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (1.325619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39440]
I0212 00:14:50.998935  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:50.999092  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:50.999115  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:50.999236  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:50.999284  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:50.999664  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-23.15827591d598fb90: (3.263206ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39450]
I0212 00:14:51.000176  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (1.264019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39440]
I0212 00:14:51.001942  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21/status: (2.08178ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39456]
I0212 00:14:51.002331  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (2.547848ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39448]
I0212 00:14:51.002820  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-21.15827591d5f7a0fe: (2.374085ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39440]
I0212 00:14:51.003379  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (1.084645ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39456]
I0212 00:14:51.003732  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.003889  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:51.003914  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:51.004018  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.004094  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.005384  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (4.676618ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39450]
I0212 00:14:51.006266  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17/status: (1.927567ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39440]
I0212 00:14:51.006437  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (1.444993ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39448]
I0212 00:14:51.007783  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-17.15827591c3b79fc5: (2.787319ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39458]
I0212 00:14:51.007840  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (1.179301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39440]
I0212 00:14:51.008081  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.008133  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (941.835µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39450]
I0212 00:14:51.008223  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:51.008238  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:51.008971  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.009013  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.010395  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.82443ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39458]
I0212 00:14:51.010755  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (981.483µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39460]
I0212 00:14:51.012507  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19/status: (2.79002ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39448]
I0212 00:14:51.012908  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.964178ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39458]
I0212 00:14:51.013647  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (2.517425ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39460]
I0212 00:14:51.014170  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.013644ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39462]
I0212 00:14:51.014411  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.014620  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-0
I0212 00:14:51.014647  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-0
I0212 00:14:51.014724  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.014772  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.015167  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (1.083569ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39458]
I0212 00:14:51.016808  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.376862ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39458]
I0212 00:14:51.017382  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (2.068541ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39464]
I0212 00:14:51.017734  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (1.975838ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39468]
I0212 00:14:51.018099  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0/status: (2.770637ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39462]
I0212 00:14:51.019983  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.673236ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39458]
I0212 00:14:51.020010  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (1.578668ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39462]
I0212 00:14:51.020691  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.020960  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-2
I0212 00:14:51.020976  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-2
I0212 00:14:51.021159  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.021242  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.022256  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (1.852826ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39458]
I0212 00:14:51.023420  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.662366ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39466]
I0212 00:14:51.023730  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (2.240024ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39462]
I0212 00:14:51.025714  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2/status: (2.964226ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39470]
I0212 00:14:51.026409  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (3.510042ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39458]
I0212 00:14:51.027611  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (1.356482ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39462]
I0212 00:14:51.028070  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.028235  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-8
I0212 00:14:51.028291  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-8
I0212 00:14:51.028399  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.028510  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.028739  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (1.505125ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39458]
I0212 00:14:51.030417  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (1.409322ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39466]
I0212 00:14:51.031080  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8/status: (2.105958ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39462]
I0212 00:14:51.031137  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (986.906µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39458]
I0212 00:14:51.032059  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.678139ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39472]
I0212 00:14:51.032834  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (1.362631ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39462]
I0212 00:14:51.033130  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.350819ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39466]
I0212 00:14:51.033192  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.033411  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:51.033465  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:51.033741  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.033916  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.034942  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (1.247667ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39462]
I0212 00:14:51.035350  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (1.206638ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39472]
I0212 00:14:51.036858  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.229577ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39476]
I0212 00:14:51.038812  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-13.15827591c31bddf6: (3.392947ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39462]
I0212 00:14:51.040251  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (2.941865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39476]
I0212 00:14:51.040306  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13/status: (4.518132ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39474]
I0212 00:14:51.042080  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (1.269843ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39472]
I0212 00:14:51.042244  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.461663ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39462]
I0212 00:14:51.042339  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.042530  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:51.042573  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:51.042691  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.042748  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.044158  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.34738ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39462]
I0212 00:14:51.044903  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (1.484314ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39478]
I0212 00:14:51.045159  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.492957ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39480]
I0212 00:14:51.045653  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.06306ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39462]
I0212 00:14:51.045736  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18/status: (2.783396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39472]
I0212 00:14:51.048769  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (1.09585ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39478]
I0212 00:14:51.048962  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.283058ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39480]
I0212 00:14:51.049171  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.049314  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10
I0212 00:14:51.049356  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10
I0212 00:14:51.049519  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.049752  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.050879  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (956.068µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39478]
I0212 00:14:51.050941  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.634953ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39480]
I0212 00:14:51.051924  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.587233ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39484]
I0212 00:14:51.052386  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10/status: (2.18562ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39482]
I0212 00:14:51.052831  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.213724ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39480]
I0212 00:14:51.054199  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (986.388µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39480]
I0212 00:14:51.054440  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (1.446295ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39484]
I0212 00:14:51.054764  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.055016  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-5
I0212 00:14:51.055103  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-5
I0212 00:14:51.055264  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.055352  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.055729  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.089898ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39480]
I0212 00:14:51.056988  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (916.385µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39480]
I0212 00:14:51.057163  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.311163ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39486]
I0212 00:14:51.057165  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5: (1.563254ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39484]
I0212 00:14:51.058880  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5/status: (3.221883ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39478]
I0212 00:14:51.059350  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.595398ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39484]
I0212 00:14:51.060413  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5: (1.060581ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39478]
I0212 00:14:51.060770  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.060966  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7
I0212 00:14:51.061008  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.292232ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39484]
I0212 00:14:51.061008  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7
I0212 00:14:51.061205  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.061280  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.062819  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.425802ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39478]
I0212 00:14:51.063424  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (1.701951ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39488]
I0212 00:14:51.064037  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7/status: (2.306181ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39486]
I0212 00:14:51.064287  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.064382ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39478]
I0212 00:14:51.064891  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-7.15827591c22d2771: (2.759192ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39490]
I0212 00:14:51.065825  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (1.251583ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39486]
I0212 00:14:51.066004  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.245209ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39478]
I0212 00:14:51.066298  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.066565  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12
I0212 00:14:51.066621  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12
I0212 00:14:51.066740  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.066799  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.067939  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.418294ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39490]
I0212 00:14:51.068320  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (1.000963ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0212 00:14:51.068884  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.455442ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0212 00:14:51.069463  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (948.667µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39490]
I0212 00:14:51.071272  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12/status: (4.26046ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39488]
I0212 00:14:51.071696  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (1.305947ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0212 00:14:51.071955  123634 preemption_test.go:598] Cleaning up all pods...
I0212 00:14:51.072860  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (1.121004ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39488]
I0212 00:14:51.073309  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.073476  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:51.073511  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:51.073630  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.073691  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.075784  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.557785ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39496]
I0212 00:14:51.076022  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (2.102455ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0212 00:14:51.076573  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16/status: (2.627555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39488]
I0212 00:14:51.077025  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (4.904228ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0212 00:14:51.078157  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (1.201219ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0212 00:14:51.078508  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.078699  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-1
I0212 00:14:51.078752  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-1
I0212 00:14:51.078876  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.078938  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.080421  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (1.204925ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0212 00:14:51.081187  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1/status: (1.538352ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39496]
I0212 00:14:51.081393  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (4.104634ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0212 00:14:51.081802  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.158349ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39498]
I0212 00:14:51.082690  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (884.303µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39496]
E0212 00:14:51.082923  123634 scheduler.go:294] Error getting the updated preemptor pod object: pods "ppod-1" not found
I0212 00:14:51.083073  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11
I0212 00:14:51.083716  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11
I0212 00:14:51.083866  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.083924  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.086290  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (2.053698ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0212 00:14:51.086412  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (4.5586ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0212 00:14:51.086296  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11/status: (2.144651ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39498]
I0212 00:14:51.087262  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-11.15827591c2ccc3c8: (2.556938ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39500]
I0212 00:14:51.088719  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (1.03396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39498]
I0212 00:14:51.089044  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.089571  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14
I0212 00:14:51.089598  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14
I0212 00:14:51.089720  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.089782  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.090450  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (3.671711ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0212 00:14:51.092342  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (2.245126ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0212 00:14:51.092705  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.323747ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39502]
I0212 00:14:51.092343  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14/status: (2.333373ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39500]
I0212 00:14:51.094387  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (1.12714ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39502]
I0212 00:14:51.094707  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.094873  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-6
I0212 00:14:51.094894  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-6
I0212 00:14:51.094962  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.095005  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.095432  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (4.654696ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0212 00:14:51.096362  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-6: (1.025923ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0212 00:14:51.096910  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-6/status: (1.65251ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39502]
I0212 00:14:51.097710  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.990556ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39504]
I0212 00:14:51.098365  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-6: (1.088594ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39502]
I0212 00:14:51.098941  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.099361  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:51.099417  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:51.099534  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.099637  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.101010  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5: (4.453725ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0212 00:14:51.101393  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (1.363012ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0212 00:14:51.101972  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9/status: (1.902919ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39504]
I0212 00:14:51.103016  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-9.15827591c281842e: (2.811037ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0212 00:14:51.103623  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (1.136844ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39504]
I0212 00:14:51.103929  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.104146  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12
I0212 00:14:51.104171  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12
I0212 00:14:51.104262  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.104314  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.106113  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-6: (4.313689ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0212 00:14:51.106695  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (1.954239ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0212 00:14:51.106917  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12/status: (2.184621ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0212 00:14:51.107646  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-12.15827591da951bef: (2.315868ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39508]
I0212 00:14:51.108224  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (922.315µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0212 00:14:51.108530  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.108705  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10
I0212 00:14:51.108725  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10
I0212 00:14:51.108826  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.108877  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.110382  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (3.520583ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0212 00:14:51.110937  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10/status: (1.629871ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39508]
I0212 00:14:51.110978  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (1.702595ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0212 00:14:51.112097  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (872.386µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0212 00:14:51.112388  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.112695  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:51.112720  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:51.112836  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.112893  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.114744  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (1.309528ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39508]
I0212 00:14:51.115135  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17/status: (1.672227ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0212 00:14:51.115370  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (4.679931ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0212 00:14:51.116097  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-10.15827591d990e23d: (6.421765ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.116647  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (1.093062ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0212 00:14:51.116960  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.117158  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:51.117181  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:51.117304  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.117357  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.119074  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-17.15827591c3b79fc5: (2.296033ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.119177  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (1.558785ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0212 00:14:51.120021  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (4.304738ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0212 00:14:51.120089  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16/status: (2.385649ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39508]
I0212 00:14:51.121732  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (1.131195ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.121879  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-16.15827591dafe42a4: (2.198775ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0212 00:14:51.122148  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.123167  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14
I0212 00:14:51.123190  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14
I0212 00:14:51.123267  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.123344  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.124613  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (1.033724ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.124983  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (4.640602ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0212 00:14:51.125963  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14/status: (2.098781ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0212 00:14:51.126902  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-14.15827591dbf3c8fe: (2.658982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.128462  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (1.845471ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0212 00:14:51.128820  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.129302  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (3.968253ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0212 00:14:51.129826  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:51.129849  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:51.129950  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.130002  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.131475  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.165888ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.134169  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-19.15827591d7236988: (3.023475ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39514]
I0212 00:14:51.134216  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19/status: (3.056885ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39516]
I0212 00:14:51.135578  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (966.219µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39516]
I0212 00:14:51.135970  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (6.228858ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.136745  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.139421  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:51.139467  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-13
I0212 00:14:51.141215  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (4.542265ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.142936  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.177356ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39516]
I0212 00:14:51.144187  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14
I0212 00:14:51.144230  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-14
I0212 00:14:51.145680  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (4.19315ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.145934  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.418174ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39516]
I0212 00:14:51.148513  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:51.148662  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-15
I0212 00:14:51.149860  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (3.85531ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.150150  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.258188ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39516]
I0212 00:14:51.153004  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:51.153064  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-16
I0212 00:14:51.155244  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.900498ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.155370  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (4.95449ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.158273  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:51.158354  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:51.159365  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (3.642155ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.160157  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.48724ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.162421  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:51.162464  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:51.164027  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.28335ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.164138  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (4.406839ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.166979  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:51.167141  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:51.168745  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.304685ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.169224  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (4.679897ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.173832  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:51.174051  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:51.176093  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (6.412194ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.177089  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.628967ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.179303  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:51.179370  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:51.181034  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (4.609854ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.181503  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.603643ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.184118  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:51.184146  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:51.187268  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (5.517043ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.188739  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (4.358713ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.190341  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:51.190425  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-23
I0212 00:14:51.194947  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (7.279723ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.196676  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.578997ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.199738  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:51.199773  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:51.201838  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.78927ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.202418  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (6.303702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.208395  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:51.208468  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-25
I0212 00:14:51.208668  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (5.822898ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.210958  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.11897ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.211831  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:51.211910  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:51.212948  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (3.916462ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.213857  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.404695ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.215841  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:51.215869  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-27
I0212 00:14:51.217539  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (4.138093ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.217735  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.619911ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.220514  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:51.220580  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-28
I0212 00:14:51.222088  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (4.103313ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.223001  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.537197ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.224967  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:51.225015  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:51.227108  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.865784ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.227818  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (5.424697ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.231285  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:51.231328  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-30
I0212 00:14:51.232585  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (4.373027ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.235010  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.296609ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.237128  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:51.237168  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:51.238379  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (4.453754ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.239379  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.997154ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.241359  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:51.241395  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-32
I0212 00:14:51.243732  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.068604ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.244156  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (5.279868ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.247296  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:51.247340  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:51.248142  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (3.677073ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.250126  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.723622ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.251393  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:51.251437  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:51.252503  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (3.970791ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.254713  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.963498ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.255307  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:51.255350  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:51.257230  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (4.3195ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.257446  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.407322ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.260255  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:51.260322  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:51.261705  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (4.175679ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.262216  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.528976ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.264636  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:51.264678  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:51.265858  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (3.752019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.266391  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.466049ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.268788  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:51.268840  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:51.270444  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (4.287325ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.270715  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.607133ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.274337  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:51.274424  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:51.278501  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (7.673886ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.279771  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (4.931553ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.281726  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:51.281787  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:51.282851  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (3.993258ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.283437  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.35694ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.285971  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:51.285998  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:51.287726  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (4.381921ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.288243  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.814669ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.290704  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:51.290783  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:51.292574  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (4.432161ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.292607  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.457213ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.295382  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:51.295421  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:51.296621  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (3.725431ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.297144  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.333326ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.299774  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:51.299811  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:51.301117  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (4.021701ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.301657  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.432925ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.303832  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:51.303874  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:51.305222  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (3.822431ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.305668  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.540674ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.308268  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:51.308304  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:51.309587  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (3.905421ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.309895  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.271714ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.312649  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:51.312683  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:51.313843  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (3.805047ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.314355  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.455918ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.316701  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:51.316742  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:51.318104  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (3.902476ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.318978  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.027462ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.321192  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:51.321283  123634 scheduler.go:449] Skip schedule deleting pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:51.322936  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (4.552745ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.323135  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.460886ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.327344  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-0: (4.038688ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.328763  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1: (1.01348ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.333668  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (4.471493ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.336338  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (1.063257ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.339297  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (1.217502ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.342229  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (1.312523ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.344810  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (1.015406ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.347669  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (996.912µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.350454  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5: (1.188044ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.353941  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-6: (1.664773ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.357012  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (1.406677ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.360304  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (1.4879ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.362990  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (927.632µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.365578  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (931.009µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.368122  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (949.265µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.370757  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (1.000315ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.373271  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (928.421µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.375871  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (879.476µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.378610  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (1.147463ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.381141  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (916.63µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.383702  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (964.297µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.386121  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (900.26µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.399381  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.213057ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.402419  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.258897ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.405271  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (1.199067ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.407848  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (991.415µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.410613  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (1.192822ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.413183  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (973.056µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.415982  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (1.164272ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.418419  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (887.079µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.421076  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (1.001452ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.423647  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (941.127µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.426143  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (932.5µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.428731  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-30: (1.029846ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.431240  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (880.746µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.433859  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-32: (1.030791ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.436363  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (949.056µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.438809  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (865.329µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.441453  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (996.012µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.444047  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (943.276µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.446716  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.028426ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.449365  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.041394ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.452159  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.068811ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.454727  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (949.842µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.457205  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (960.425µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.459770  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (981.035µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.462349  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (986.005µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.465188  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.044038ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.467471  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (800.256µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.470300  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (1.267776ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.474587  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.193787ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.477237  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.005821ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.479877  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (982.683µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.482444  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-0: (964.529µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.485057  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1: (1.011042ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.487597  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (973.489µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.490100  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.027067ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.490404  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0
I0212 00:14:51.490422  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0
I0212 00:14:51.490629  123634 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0", node "node1"
I0212 00:14:51.490654  123634 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I0212 00:14:51.490718  123634 factory.go:733] Attempting to bind rpod-0 to node1
I0212 00:14:51.492504  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.832687ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.492523  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1
I0212 00:14:51.492537  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1
I0212 00:14:51.492673  123634 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1", node "node1"
I0212 00:14:51.492699  123634 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I0212 00:14:51.492747  123634 factory.go:733] Attempting to bind rpod-1 to node1
I0212 00:14:51.492915  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-0/binding: (1.922534ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.493110  123634 scheduler.go:571] pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 00:14:51.494404  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1/binding: (1.428924ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.494643  123634 scheduler.go:571] pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 00:14:51.495223  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.899472ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.497283  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.51865ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.595362  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-0: (2.006051ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.698332  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1: (1.98495ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.698781  123634 preemption_test.go:561] Creating the preemptor pod...
I0212 00:14:51.701222  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.097566ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.701395  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:51.701423  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:51.701579  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.701632  123634 preemption_test.go:567] Creating additional pods...
I0212 00:14:51.701650  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.703391  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.226611ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39518]
I0212 00:14:51.703726  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.843586ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.704106  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod/status: (2.247687ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.706279  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (4.100877ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39520]
I0212 00:14:51.706464  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.378151ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0212 00:14:51.706511  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.312644ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0212 00:14:51.706896  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.708541  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.506906ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39520]
I0212 00:14:51.709417  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod/status: (2.010229ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39518]
I0212 00:14:51.710809  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.52628ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39520]
I0212 00:14:51.712652  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.476636ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39520]
I0212 00:14:51.714313  123634 wrap.go:47] DELETE /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/rpod-1: (4.420767ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39518]
I0212 00:14:51.714728  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:51.714752  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod
I0212 00:14:51.714786  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.658288ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39520]
I0212 00:14:51.714926  123634 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod", node "node1"
I0212 00:14:51.714945  123634 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I0212 00:14:51.715079  123634 factory.go:733] Attempting to bind preemptor-pod to node1
I0212 00:14:51.715035  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4
I0212 00:14:51.715133  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4
I0212 00:14:51.715224  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.715257  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.716348  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.60109ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39518]
I0212 00:14:51.716824  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.664208ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39520]
I0212 00:14:51.718211  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4/status: (2.011029ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39522]
I0212 00:14:51.718456  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod/binding: (2.314348ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39526]
I0212 00:14:51.718658  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.345429ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39518]
I0212 00:14:51.718866  123634 scheduler.go:571] pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0212 00:14:51.718876  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.270567ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39520]
I0212 00:14:51.719419  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (2.690092ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39524]
I0212 00:14:51.719903  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (1.025962ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39526]
I0212 00:14:51.720232  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.720590  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3
I0212 00:14:51.720613  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3
I0212 00:14:51.720645  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.422816ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39522]
I0212 00:14:51.720755  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.681484ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39518]
I0212 00:14:51.720814  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.720875  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.722261  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (1.228169ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39526]
I0212 00:14:51.723292  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.016167ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39518]
I0212 00:14:51.723695  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3/status: (2.619896ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39524]
I0212 00:14:51.724124  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.514575ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39528]
I0212 00:14:51.725835  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (1.554273ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39524]
I0212 00:14:51.726134  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.726140  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.029591ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39518]
I0212 00:14:51.726380  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7
I0212 00:14:51.726402  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7
I0212 00:14:51.726525  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.726595  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.728453  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.684403ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39528]
I0212 00:14:51.728615  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.531535ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39532]
I0212 00:14:51.730290  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7/status: (3.396234ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39526]
I0212 00:14:51.730473  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (2.516449ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39530]
I0212 00:14:51.730859  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.775361ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39532]
I0212 00:14:51.732153  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (1.097501ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39526]
I0212 00:14:51.732576  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.732822  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3
I0212 00:14:51.732868  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3
I0212 00:14:51.732946  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.622929ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39532]
I0212 00:14:51.733092  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.733165  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.734731  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (1.333887ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39526]
I0212 00:14:51.734862  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.440939ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39528]
I0212 00:14:51.735346  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3/status: (1.615732ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39534]
I0212 00:14:51.737073  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (1.36265ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39534]
I0212 00:14:51.737208  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.788999ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39528]
I0212 00:14:51.737351  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.737516  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7
I0212 00:14:51.737565  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7
I0212 00:14:51.737659  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.737701  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.739193  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (1.063593ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39536]
I0212 00:14:51.739666  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7/status: (1.758088ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39534]
I0212 00:14:51.740059  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.199384ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39528]
I0212 00:14:51.740725  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-3.158275920191900d: (4.386387ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39526]
I0212 00:14:51.742447  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.71022ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39528]
I0212 00:14:51.743464  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (3.327437ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39534]
I0212 00:14:51.743743  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.743873  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-7.1582759201e8d087: (2.421915ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39526]
I0212 00:14:51.743935  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-8
I0212 00:14:51.743957  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-8
I0212 00:14:51.744119  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.744171  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.745752  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (1.243188ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39536]
I0212 00:14:51.747304  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.44924ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39538]
I0212 00:14:51.747890  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8/status: (3.404846ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39534]
I0212 00:14:51.748304  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (5.248824ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39528]
I0212 00:14:51.749650  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (1.312766ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39538]
I0212 00:14:51.750071  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.750240  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4
I0212 00:14:51.750266  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4
I0212 00:14:51.750395  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.750573  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.751949  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.855903ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39528]
I0212 00:14:51.753184  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (1.868345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39538]
I0212 00:14:51.753855  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4/status: (2.557516ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39536]
I0212 00:14:51.754296  123634 backoff_utils.go:79] Backing off 2s
I0212 00:14:51.755429  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-4.15827592013bdb4b: (4.037866ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39540]
I0212 00:14:51.755797  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (1.247665ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39536]
I0212 00:14:51.755894  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.158886ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39538]
I0212 00:14:51.756145  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.756336  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:51.756359  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:51.756449  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.756754  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.758075  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (1.310988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39528]
I0212 00:14:51.758987  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.570906ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39540]
I0212 00:14:51.759086  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.354179ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39544]
I0212 00:14:51.759467  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18/status: (2.140066ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39542]
I0212 00:14:51.761033  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (1.141246ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39542]
I0212 00:14:51.761234  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.761390  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:51.761411  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20
I0212 00:14:51.761584  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.761652  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.761902  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.436424ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39540]
I0212 00:14:51.762943  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.121958ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39542]
I0212 00:14:51.763974  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20/status: (2.084434ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39528]
I0212 00:14:51.764248  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.493628ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39546]
I0212 00:14:51.765628  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (3.21481ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39540]
I0212 00:14:51.766189  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.791794ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39542]
I0212 00:14:51.766200  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.541375ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39546]
I0212 00:14:51.766496  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.766665  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:51.766702  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21
I0212 00:14:51.766822  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.766882  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.768359  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (1.060108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0212 00:14:51.768826  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21/status: (1.687712ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0212 00:14:51.768881  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.691262ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39540]
I0212 00:14:51.770308  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.98054ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0212 00:14:51.771021  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.725138ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0212 00:14:51.771060  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (1.665407ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0212 00:14:51.771429  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.771609  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:51.771609  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:51.771692  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24
I0212 00:14:51.771775  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.771819  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.772439  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:51.773011  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.487363ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0212 00:14:51.774235  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.594933ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39556]
I0212 00:14:51.774308  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.829694ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39554]
I0212 00:14:51.774899  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.480979ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0212 00:14:51.774975  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24/status: (2.887883ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0212 00:14:51.775598  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:51.776305  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:51.776385  123634 reflector.go:248] k8s.io/client-go/informers/factory.go:132: forcing resync
I0212 00:14:51.776782  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.401585ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39554]
I0212 00:14:51.776788  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.441311ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39556]
I0212 00:14:51.777157  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.777287  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:51.777306  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26
I0212 00:14:51.777395  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.777462  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.778829  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.468017ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39556]
I0212 00:14:51.779778  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26/status: (2.044722ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39554]
I0212 00:14:51.780323  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.249624ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39558]
I0212 00:14:51.781689  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.86475ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39560]
I0212 00:14:51.781697  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (1.535374ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39554]
I0212 00:14:51.782224  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.782442  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:51.782455  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:51.782590  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.782632  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (3.277431ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39556]
I0212 00:14:51.782638  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.784010  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.566797ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39554]
I0212 00:14:51.784443  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.264385ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39564]
I0212 00:14:51.784661  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29/status: (1.792246ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39558]
I0212 00:14:51.785884  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (3.042772ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39556]
I0212 00:14:51.786412  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.416208ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39554]
I0212 00:14:51.786537  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.395344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39564]
I0212 00:14:51.786913  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.787139  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:51.787160  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:51.787237  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.787283  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.788402  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.395303ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39556]
I0212 00:14:51.789277  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.285504ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39566]
I0212 00:14:51.789431  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31/status: (1.894049ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39562]
I0212 00:14:51.790049  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.276921ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39556]
I0212 00:14:51.791218  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.255576ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39562]
I0212 00:14:51.791218  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.055771ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39568]
I0212 00:14:51.791630  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.791802  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:51.791835  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:51.791944  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.791994  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.794014  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33/status: (1.6356ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39566]
I0212 00:14:51.794076  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.168508ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39556]
I0212 00:14:51.794384  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.952057ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39572]
I0212 00:14:51.794468  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.959867ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0212 00:14:51.795664  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.179164ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39556]
I0212 00:14:51.795852  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.26401ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39566]
I0212 00:14:51.795944  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.796534  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:51.796631  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:51.796768  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.796839  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.797811  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.530304ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39572]
I0212 00:14:51.799030  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.579684ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39576]
I0212 00:14:51.799167  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.904382ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0212 00:14:51.800613  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.885038ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39580]
I0212 00:14:51.800627  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35/status: (2.097015ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39572]
I0212 00:14:51.802440  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.267614ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39578]
I0212 00:14:51.802456  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.409523ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39576]
I0212 00:14:51.802716  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.802856  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:51.802872  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:51.802937  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.802981  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.804774  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.434213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39582]
I0212 00:14:51.805150  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.543679ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0212 00:14:51.805400  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36/status: (2.096234ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39578]
I0212 00:14:51.805662  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.577509ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39576]
I0212 00:14:51.806862  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.039303ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0212 00:14:51.807301  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.807467  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.349993ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39576]
I0212 00:14:51.807468  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:51.807671  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:51.807790  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.807840  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.812148  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (4.078547ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39582]
I0212 00:14:51.812319  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (4.3429ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0212 00:14:51.812843  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (4.613263ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0212 00:14:51.812968  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40/status: (4.669113ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0212 00:14:51.814735  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.646177ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0212 00:14:51.814971  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.660129ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0212 00:14:51.815134  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.815352  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:51.815376  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36
I0212 00:14:51.815470  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.815603  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.816747  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.681211ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0212 00:14:51.817691  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36/status: (1.714075ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39582]
I0212 00:14:51.818151  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (2.073891ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0212 00:14:51.819811  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (2.002571ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39592]
I0212 00:14:51.820183  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-36: (1.891171ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39582]
I0212 00:14:51.820440  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-36.1582759206766cd4: (3.047271ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0212 00:14:51.820591  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.820810  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:51.820849  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:51.820938  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.821002  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.822011  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.54749ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0212 00:14:51.823357  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.588655ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0212 00:14:51.823792  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (2.136161ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39590]
I0212 00:14:51.824063  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.671438ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0212 00:14:51.824265  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44/status: (2.562039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39582]
I0212 00:14:51.824616  123634 cacher.go:633] cacher (*core.Pod): 1 objects queued in incoming channel.
I0212 00:14:51.824649  123634 cacher.go:633] cacher (*core.Pod): 2 objects queued in incoming channel.
I0212 00:14:51.826404  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.672891ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0212 00:14:51.826419  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods: (1.615897ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39590]
I0212 00:14:51.826707  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.826931  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:51.826952  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:51.827062  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.827123  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.829051  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.68665ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39590]
I0212 00:14:51.829380  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43/status: (2.030714ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0212 00:14:51.829657  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.859707ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0212 00:14:51.831604  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.208749ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0212 00:14:51.831978  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.832164  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:51.832184  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:51.832252  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.832297  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.834238  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49/status: (1.479165ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0212 00:14:51.834695  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.675583ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0212 00:14:51.835170  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (2.470135ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39590]
I0212 00:14:51.836853  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (1.119271ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0212 00:14:51.837078  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.837262  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:51.837285  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43
I0212 00:14:51.837377  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.837425  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.839355  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43/status: (1.651756ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0212 00:14:51.839810  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.676988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0212 00:14:51.840986  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-43.1582759207e6bf6e: (2.565207ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39600]
I0212 00:14:51.841271  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-43: (1.007648ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0212 00:14:51.841582  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.841744  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:51.841779  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49
I0212 00:14:51.841859  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.841923  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.844574  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49/status: (1.896579ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39600]
I0212 00:14:51.844688  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (1.332279ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0212 00:14:51.846020  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-49.158275920835b88a: (3.302779ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39602]
I0212 00:14:51.846395  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-49: (1.26287ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39600]
I0212 00:14:51.846774  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.846935  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:51.846956  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48
I0212 00:14:51.847030  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.847120  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.849229  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.848092ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0212 00:14:51.849478  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48/status: (2.113233ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39602]
I0212 00:14:51.850085  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.350102ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39604]
I0212 00:14:51.851302  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-48: (1.234656ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39602]
I0212 00:14:51.851620  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.851752  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:51.851771  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:51.851847  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.851894  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.853499  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.043673ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0212 00:14:51.854096  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.488284ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0212 00:14:51.871728  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47/status: (19.175832ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39604]
I0212 00:14:51.881105  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (2.03525ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0212 00:14:51.882447  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.882686  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:51.882710  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:51.882811  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.882876  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.886324  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.17546ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0212 00:14:51.894364  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46/status: (10.900977ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0212 00:14:51.894365  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (10.986552ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0212 00:14:51.897946  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (2.191833ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0212 00:14:51.898257  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.898459  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:51.898478  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47
I0212 00:14:51.898640  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.898693  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.903165  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-47.158275920960bce4: (3.338301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0212 00:14:51.904264  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (4.415274ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0212 00:14:51.904385  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47/status: (5.411809ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0212 00:14:51.906256  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-47: (1.357932ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0212 00:14:51.906540  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.906771  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:51.906884  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46
I0212 00:14:51.907040  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.907179  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.909796  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46/status: (1.953492ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0212 00:14:51.910725  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (3.335056ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0212 00:14:51.911455  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-46.158275920b3962ec: (3.363891ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0212 00:14:51.912575  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-46: (2.284045ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0212 00:14:51.912908  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.913136  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:51.913148  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44
I0212 00:14:51.913273  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.913320  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.916276  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (2.652434ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0212 00:14:51.916801  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-44.1582759207895804: (2.692562ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0212 00:14:51.917335  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44/status: (3.38697ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0212 00:14:51.919574  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-44: (1.755711ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0212 00:14:51.919868  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.920027  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:51.920058  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:51.920140  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.920208  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.922739  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.019793ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0212 00:14:51.924477  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (2.634136ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0212 00:14:51.925084  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45/status: (4.099941ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39616]
I0212 00:14:51.926937  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.387572ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0212 00:14:51.927199  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.927381  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:51.927416  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40
I0212 00:14:51.927757  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.927888  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.928342  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.199945ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0212 00:14:51.930823  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40/status: (1.969695ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0212 00:14:51.932059  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-40.1582759206c082aa: (2.806853ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39618]
I0212 00:14:51.932963  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (1.506651ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0212 00:14:51.933224  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.933478  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:51.933524  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45
I0212 00:14:51.933697  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.933745  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.934834  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-40: (2.544524ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0212 00:14:51.936446  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (2.258555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39618]
I0212 00:14:51.937691  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-45.158275920d731c57: (2.828588ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39620]
I0212 00:14:51.937900  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45/status: (3.700433ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0212 00:14:51.939738  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-45: (1.273509ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39618]
I0212 00:14:51.939980  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.940469  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:51.940579  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:51.940751  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.940826  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.943264  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (2.163017ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39618]
I0212 00:14:51.944714  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.820658ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0212 00:14:51.945370  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42/status: (3.8964ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0212 00:14:51.947108  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.314566ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0212 00:14:51.947408  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.947979  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:51.948001  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:51.948356  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.948465  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.950615  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (1.28566ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39618]
I0212 00:14:51.951137  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.649352ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0212 00:14:51.952745  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41/status: (3.404161ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0212 00:14:51.955965  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (2.337746ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0212 00:14:51.956287  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.956619  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:51.956671  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42
I0212 00:14:51.956835  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.956920  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.959129  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.937459ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0212 00:14:51.959676  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42/status: (1.996939ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39618]
I0212 00:14:51.961653  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-42.158275920eadb2de: (3.130169ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39626]
I0212 00:14:51.961757  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-42: (1.688045ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39618]
I0212 00:14:51.961979  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.962183  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:51.962225  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41
I0212 00:14:51.962413  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.962511  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.964107  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (1.277651ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39626]
I0212 00:14:51.965684  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41/status: (2.828005ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0212 00:14:51.966186  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-41.158275920f22410e: (2.868791ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0212 00:14:51.967409  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-41: (1.338985ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0212 00:14:51.967773  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.967977  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:51.967997  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:51.968085  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.968176  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.969748  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.220053ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39626]
I0212 00:14:51.970143  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.337889ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39630]
I0212 00:14:51.971608  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39/status: (2.996987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0212 00:14:51.973316  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.242314ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39630]
I0212 00:14:51.973613  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.973798  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:51.973818  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35
I0212 00:14:51.973894  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.973942  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.976753  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.61538ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39626]
I0212 00:14:51.977181  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35/status: (2.933818ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39630]
I0212 00:14:51.979342  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-35.1582759206189e14: (4.514244ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0212 00:14:51.980277  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-35: (1.489968ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39630]
I0212 00:14:51.980649  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.980878  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:51.980919  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39
I0212 00:14:51.981085  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.981171  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.984078  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39/status: (2.283289ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0212 00:14:51.984441  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (3.01973ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39626]
I0212 00:14:51.984981  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-39.15827592104f0cc7: (2.455932ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39634]
I0212 00:14:51.986407  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-39: (1.082146ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0212 00:14:51.986838  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.987044  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:51.987083  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:51.987236  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.987318  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.989807  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (2.164924ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39626]
I0212 00:14:51.989958  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.626746ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39636]
I0212 00:14:51.990392  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38/status: (2.588653ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0212 00:14:51.992460  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.25262ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39636]
I0212 00:14:51.992750  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:51.992901  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:51.992920  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:51.992992  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:51.993040  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:51.994707  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.338851ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39626]
I0212 00:14:51.995140  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.398063ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39638]
I0212 00:14:51.997699  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37/status: (2.334794ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39636]
I0212 00:14:51.999674  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.511835ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39638]
I0212 00:14:52.000088  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:52.000358  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:52.000402  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38
I0212 00:14:52.000592  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:52.000685  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:52.004467  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-38.1582759211731de9: (2.475888ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39640]
I0212 00:14:52.004860  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (3.498173ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39638]
I0212 00:14:52.006726  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38/status: (5.718334ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39626]
I0212 00:14:52.009290  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-38: (1.426817ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39626]
I0212 00:14:52.009587  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:52.009917  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:52.010057  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37
I0212 00:14:52.010219  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:52.010295  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:52.012800  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37/status: (2.246808ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39626]
I0212 00:14:52.013350  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (1.758881ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39640]
I0212 00:14:52.015413  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-37.1582759211ca7180: (2.955209ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0212 00:14:52.016514  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-37: (2.336128ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39626]
I0212 00:14:52.017137  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:52.017437  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-0
I0212 00:14:52.017462  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-0
I0212 00:14:52.017624  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:52.017686  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:52.019312  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (1.312499ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39640]
I0212 00:14:52.021436  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0/status: (2.920027ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0212 00:14:52.023158  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (1.238708ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0212 00:14:52.023192  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.818765ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39640]
I0212 00:14:52.023376  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:52.023626  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-2
I0212 00:14:52.023648  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-2
I0212 00:14:52.023728  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:52.023777  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:52.025635  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (1.037184ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0212 00:14:52.026166  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.656099ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39644]
I0212 00:14:52.027507  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2/status: (3.490915ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39640]
I0212 00:14:52.029375  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (1.410234ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39644]
I0212 00:14:52.029659  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:52.029825  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:52.029861  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9
I0212 00:14:52.029963  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:52.030037  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:52.030257  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/preemptor-pod: (1.138931ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0212 00:14:52.030606  123634 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I0212 00:14:52.033157  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-0: (1.426653ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39648]
I0212 00:14:52.033294  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9/status: (2.606648ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39644]
I0212 00:14:52.034612  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (3.544112ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0212 00:14:52.034954  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-1: (1.319734ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39644]
I0212 00:14:52.035241  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (1.58596ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39648]
I0212 00:14:52.036219  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (4.631827ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39646]
I0212 00:14:52.036353  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:52.036565  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:52.036581  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17
I0212 00:14:52.036660  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:52.036711  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:52.037783  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-2: (1.062094ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0212 00:14:52.040263  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.949619ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39654]
I0212 00:14:52.040967  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17/status: (3.451219ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.040999  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (3.783738ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39650]
I0212 00:14:52.042113  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-3: (4.025752ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0212 00:14:52.042522  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (1.241434ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.042810  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:52.043228  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:52.043392  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31
I0212 00:14:52.043596  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:52.043683  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:52.043868  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-4: (1.140613ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0212 00:14:52.047365  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (3.396822ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39654]
I0212 00:14:52.047615  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-31.158275920586d75b: (3.228848ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0212 00:14:52.048071  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5: (3.304949ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39656]
I0212 00:14:52.049325  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31/status: (5.336393ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.050122  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-6: (1.633729ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0212 00:14:52.050824  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-31: (1.044627ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.051057  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:52.051208  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:52.051227  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33
I0212 00:14:52.051302  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:52.051351  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:52.051921  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-7: (1.036503ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0212 00:14:52.054514  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33/status: (2.850725ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.054721  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (2.599522ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39654]
I0212 00:14:52.054811  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-33.1582759205ceb6ae: (2.504211ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0212 00:14:52.056167  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-33: (1.172ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39654]
I0212 00:14:52.056451  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:52.056885  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-8: (2.443282ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39658]
I0212 00:14:52.057069  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:52.057099  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19
I0212 00:14:52.057203  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:52.057260  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:52.059824  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.934023ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39662]
I0212 00:14:52.059951  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (2.536208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.060171  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-9: (2.833123ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39642]
I0212 00:14:52.060343  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19/status: (2.592098ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39660]
I0212 00:14:52.061951  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (1.183779ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.061980  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.188987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39662]
I0212 00:14:52.062237  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:52.062418  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10
I0212 00:14:52.062443  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10
I0212 00:14:52.062607  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:52.062686  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:52.063797  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-11: (1.136929ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.066095  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (3.206178ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39662]
I0212 00:14:52.066378  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-12: (1.857511ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39668]
I0212 00:14:52.066767  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.446327ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.066894  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10/status: (3.013402ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39664]
I0212 00:14:52.068674  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-13: (1.148682ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.069513  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-10: (1.24136ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39664]
I0212 00:14:52.069768  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:52.070041  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:52.070057  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34
I0212 00:14:52.070155  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:52.070207  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:52.070376  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-14: (1.120316ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.071737  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-15: (981.317µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.072306  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.118987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39666]
I0212 00:14:52.074859  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-16: (2.838043ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.075047  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34/status: (4.259485ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39664]
I0212 00:14:52.076333  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-17: (1.037962ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.076910  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.296073ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39666]
I0212 00:14:52.077174  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-34: (1.1437ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39664]
I0212 00:14:52.077418  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:52.077745  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:52.077769  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18
I0212 00:14:52.077867  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:52.077918  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:52.078044  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (1.166044ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.080379  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (1.668138ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39666]
I0212 00:14:52.080702  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-19: (1.354993ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.082269  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-18.1582759203b4e67d: (3.523627ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39670]
I0212 00:14:52.082586  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18/status: (3.829366ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39664]
I0212 00:14:52.083172  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-20: (1.113702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.084263  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-18: (1.224522ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39670]
I0212 00:14:52.084520  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-21: (979.423µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39652]
I0212 00:14:52.084569  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:52.084709  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:52.084740  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22
I0212 00:14:52.084834  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:52.084886  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:52.086383  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (1.240086ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39670]
I0212 00:14:52.088531  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (2.313177ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39674]
I0212 00:14:52.088840  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (3.400617ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39672]
I0212 00:14:52.089174  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-23: (1.939131ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39670]
I0212 00:14:52.089397  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22/status: (4.132615ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39666]
I0212 00:14:52.090874  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-22: (997.474µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39674]
I0212 00:14:52.091000  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-24: (1.226238ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39672]
I0212 00:14:52.091376  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:52.091630  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:52.091655  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29
I0212 00:14:52.091751  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:52.091802  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:52.092772  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-25: (1.30236ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39674]
I0212 00:14:52.093531  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.117672ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39676]
I0212 00:14:52.094896  123634 wrap.go:47] PATCH /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events/ppod-29.15827592053ff8a7: (2.23524ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39678]
I0212 00:14:52.095199  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29/status: (2.840178ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39672]
I0212 00:14:52.095460  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-26: (2.07748ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39674]
I0212 00:14:52.097205  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.352033ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39678]
I0212 00:14:52.097207  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-27: (1.362529ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39674]
I0212 00:14:52.098108  123634 generic_scheduler.go:1116] Node node1 is a potential node for preemption.
I0212 00:14:52.098299  123634 scheduling_queue.go:868] About to try and schedule pod preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-5
I0212 00:14:52.098338  123634 scheduler.go:453] Attempting to schedule pod: preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-5
I0212 00:14:52.098518  123634 factory.go:647] Unable to schedule preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0212 00:14:52.098634  123634 factory.go:742] Updating pod condition for preemption-race3458f067-2e5b-11e9-8330-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0212 00:14:52.099934  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-28: (1.495285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39678]
I0212 00:14:52.101323  123634 wrap.go:47] PUT /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5/status: (2.102297ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39676]
I0212 00:14:52.101323  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-5: (2.047259ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39680]
I0212 00:14:52.102684  123634 wrap.go:47] POST /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/events: (1.784765ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39682]
I0212 00:14:52.102762  123634 wrap.go:47] GET /api/v1/namespaces/preemption-race3458f067-2e5b-11e9-8330-0242ac110002/pods/ppod-29: (1.56723ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39678]
I0212 00:14:52.103161  123634 wrap.go:47] GET /api/v1/namespaces/preemptio