PRdims: [WIP] Log http request and count number of traces before logging
ResultFAILURE
Tests 1 failed / 606 succeeded
Started2019-01-11 23:22
Elapsed27m31s
Revision
Buildergke-prow-containerd-pool-99179761-nfzr
Refs master:08bee2cc
72820:6c49badf
poda106d76c-15f7-11e9-b9b3-0a580a6c0361
infra-commit2a90eab87
poda106d76c-15f7-11e9-b9b3-0a580a6c0361
repok8s.io/kubernetes
repo-commit7165f86f0568a4935f70358c16f5fe18c3e72a75
repos{u'k8s.io/kubernetes': u'master:08bee2cc8453c50c6d632634e9ceffe05bf8d4ba,72820:6c49badf3e973d713bb3082e88b23dcfa6d2b2d5'}

Test Failures


k8s.io/kubernetes/test/integration/scheduler TestPreemptionRaces 15s

go test -v k8s.io/kubernetes/test/integration/scheduler -run TestPreemptionRaces$
I0111 23:42:15.067643  120899 services.go:33] Network range for service cluster IPs is unspecified. Defaulting to {10.0.0.0 ffffff00}.
I0111 23:42:15.067668  120899 services.go:45] Setting service IP to "10.0.0.1" (read-write).
I0111 23:42:15.067677  120899 master.go:273] Node port range unspecified. Defaulting to 30000-32767.
I0111 23:42:15.067687  120899 master.go:229] Using reconciler: 
I0111 23:42:15.070131  120899 storage_factory.go:285] storing podtemplates in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.070336  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.070404  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.070485  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.070686  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.071625  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.071929  120899 store.go:1414] Monitoring podtemplates count at <storage-prefix>//podtemplates
I0111 23:42:15.072023  120899 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.072393  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.072612  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.072703  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.072799  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.072886  120899 reflector.go:169] Listing and watching *core.PodTemplate from storage/cacher.go:/podtemplates
I0111 23:42:15.073262  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.073648  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.073726  120899 store.go:1414] Monitoring events count at <storage-prefix>//events
I0111 23:42:15.073825  120899 storage_factory.go:285] storing limitranges in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.073990  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.074017  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.074083  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.074205  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.074441  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.074800  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.074857  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.074986  120899 store.go:1414] Monitoring limitranges count at <storage-prefix>//limitranges
I0111 23:42:15.075029  120899 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.075137  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.075164  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.075216  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.075272  120899 reflector.go:169] Listing and watching *core.LimitRange from storage/cacher.go:/limitranges
I0111 23:42:15.075545  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.077531  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.077613  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.077665  120899 store.go:1414] Monitoring resourcequotas count at <storage-prefix>//resourcequotas
I0111 23:42:15.077735  120899 reflector.go:169] Listing and watching *core.ResourceQuota from storage/cacher.go:/resourcequotas
I0111 23:42:15.077910  120899 storage_factory.go:285] storing secrets in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.078006  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.078032  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.078093  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.078146  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.080622  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.080795  120899 store.go:1414] Monitoring secrets count at <storage-prefix>//secrets
I0111 23:42:15.080987  120899 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.081080  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.081106  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.081148  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.081235  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.081371  120899 reflector.go:169] Listing and watching *core.Secret from storage/cacher.go:/secrets
I0111 23:42:15.081644  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.083446  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.083566  120899 store.go:1414] Monitoring persistentvolumes count at <storage-prefix>//persistentvolumes
I0111 23:42:15.083670  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.083821  120899 reflector.go:169] Listing and watching *core.PersistentVolume from storage/cacher.go:/persistentvolumes
I0111 23:42:15.083926  120899 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.084019  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.084044  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.084084  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.084198  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.085443  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.085566  120899 store.go:1414] Monitoring persistentvolumeclaims count at <storage-prefix>//persistentvolumeclaims
I0111 23:42:15.085696  120899 storage_factory.go:285] storing configmaps in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.085773  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.085805  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.085833  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.085880  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.085908  120899 reflector.go:169] Listing and watching *core.PersistentVolumeClaim from storage/cacher.go:/persistentvolumeclaims
I0111 23:42:15.086067  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.086242  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.086356  120899 store.go:1414] Monitoring configmaps count at <storage-prefix>//configmaps
I0111 23:42:15.086519  120899 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.086587  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.086596  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.086618  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.086673  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.086694  120899 reflector.go:169] Listing and watching *core.ConfigMap from storage/cacher.go:/configmaps
I0111 23:42:15.086919  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.087073  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.087143  120899 store.go:1414] Monitoring namespaces count at <storage-prefix>//namespaces
I0111 23:42:15.087261  120899 storage_factory.go:285] storing endpoints in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.087355  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.087364  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.087385  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.087438  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.087459  120899 reflector.go:169] Listing and watching *core.Namespace from storage/cacher.go:/namespaces
I0111 23:42:15.087589  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.087815  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.087881  120899 store.go:1414] Monitoring endpoints count at <storage-prefix>//endpoints
I0111 23:42:15.087997  120899 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.088067  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.088079  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.088113  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.088171  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.088190  120899 reflector.go:169] Listing and watching *core.Endpoints from storage/cacher.go:/endpoints
I0111 23:42:15.090232  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.091636  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.091692  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.091850  120899 store.go:1414] Monitoring nodes count at <storage-prefix>//nodes
I0111 23:42:15.092077  120899 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.092207  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.092270  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.092381  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.092472  120899 reflector.go:169] Listing and watching *core.Node from storage/cacher.go:/nodes
I0111 23:42:15.092818  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.093188  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.093366  120899 store.go:1414] Monitoring pods count at <storage-prefix>//pods
I0111 23:42:15.094946  120899 reflector.go:169] Listing and watching *core.Pod from storage/cacher.go:/pods
I0111 23:42:15.095095  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.095333  120899 storage_factory.go:285] storing serviceaccounts in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.095492  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.095517  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.095548  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.095670  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.097666  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.098081  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.098270  120899 store.go:1414] Monitoring serviceaccounts count at <storage-prefix>//serviceaccounts
I0111 23:42:15.098539  120899 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.098679  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.098702  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.098793  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.098946  120899 reflector.go:169] Listing and watching *core.ServiceAccount from storage/cacher.go:/serviceaccounts
I0111 23:42:15.099097  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.100870  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.101130  120899 store.go:1414] Monitoring services count at <storage-prefix>//services
I0111 23:42:15.101173  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.101251  120899 reflector.go:169] Listing and watching *core.Service from storage/cacher.go:/services
I0111 23:42:15.101211  120899 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.101501  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.101544  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.103104  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.103373  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.103821  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.103993  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.104035  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.104165  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.104350  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.104397  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.104714  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.105072  120899 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.105173  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.105198  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.105241  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.105320  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.105564  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.106129  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.106350  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.106524  120899 store.go:1414] Monitoring replicationcontrollers count at <storage-prefix>//replicationcontrollers
I0111 23:42:15.106616  120899 reflector.go:169] Listing and watching *core.ReplicationController from storage/cacher.go:/replicationcontrollers
I0111 23:42:15.124498  120899 master.go:408] Skipping disabled API group "auditregistration.k8s.io".
I0111 23:42:15.124539  120899 master.go:416] Enabling API group "authentication.k8s.io".
I0111 23:42:15.124561  120899 master.go:416] Enabling API group "authorization.k8s.io".
I0111 23:42:15.124758  120899 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.124907  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.124931  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.124989  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.125140  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.125870  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.126142  120899 store.go:1414] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0111 23:42:15.126392  120899 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.126577  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.126668  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.126773  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.126938  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.127324  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.128233  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.128426  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.128715  120899 store.go:1414] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0111 23:42:15.128799  120899 reflector.go:169] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0111 23:42:15.129238  120899 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.129345  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.129361  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.129391  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.129470  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.127029  120899 reflector.go:169] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0111 23:42:15.131136  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.131248  120899 store.go:1414] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0111 23:42:15.131262  120899 master.go:416] Enabling API group "autoscaling".
I0111 23:42:15.131454  120899 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.131540  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.131552  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.131580  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.131648  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.131675  120899 reflector.go:169] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0111 23:42:15.131868  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.132049  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.132177  120899 store.go:1414] Monitoring jobs.batch count at <storage-prefix>//jobs
I0111 23:42:15.132389  120899 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.132469  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.132482  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.132514  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.132575  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.132602  120899 reflector.go:169] Listing and watching *batch.Job from storage/cacher.go:/jobs
I0111 23:42:15.132876  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.133091  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.133197  120899 store.go:1414] Monitoring cronjobs.batch count at <storage-prefix>//cronjobs
I0111 23:42:15.133216  120899 master.go:416] Enabling API group "batch".
I0111 23:42:15.133372  120899 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.133428  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.133443  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.133467  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.133528  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.133559  120899 reflector.go:169] Listing and watching *batch.CronJob from storage/cacher.go:/cronjobs
I0111 23:42:15.133886  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.134205  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.134265  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.134360  120899 store.go:1414] Monitoring certificatesigningrequests.certificates.k8s.io count at <storage-prefix>//certificatesigningrequests
I0111 23:42:15.134419  120899 master.go:416] Enabling API group "certificates.k8s.io".
I0111 23:42:15.134807  120899 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.134985  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.135045  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.135100  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.134539  120899 reflector.go:169] Listing and watching *certificates.CertificateSigningRequest from storage/cacher.go:/certificatesigningrequests
I0111 23:42:15.135190  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.136176  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.136323  120899 store.go:1414] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I0111 23:42:15.136562  120899 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.136657  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.136682  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.136730  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.136872  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.136935  120899 reflector.go:169] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I0111 23:42:15.137171  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.137524  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.139774  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.140045  120899 store.go:1414] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I0111 23:42:15.140083  120899 master.go:416] Enabling API group "coordination.k8s.io".
I0111 23:42:15.140088  120899 reflector.go:169] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I0111 23:42:15.140303  120899 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.140372  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.140383  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.140412  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.140552  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.140783  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.140904  120899 store.go:1414] Monitoring replicationcontrollers count at <storage-prefix>//replicationcontrollers
I0111 23:42:15.140946  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.141019  120899 reflector.go:169] Listing and watching *core.ReplicationController from storage/cacher.go:/replicationcontrollers
I0111 23:42:15.141063  120899 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.141130  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.141141  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.141212  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.141255  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.141508  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.141760  120899 store.go:1414] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I0111 23:42:15.141903  120899 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.141971  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.141983  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.142009  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.142108  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.142134  120899 reflector.go:169] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I0111 23:42:15.142267  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.143389  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.143539  120899 store.go:1414] Monitoring deployments.apps count at <storage-prefix>//deployments
I0111 23:42:15.143682  120899 storage_factory.go:285] storing ingresses.extensions in extensions/v1beta1, reading as extensions/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.143777  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.143793  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.143828  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.143907  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.143933  120899 reflector.go:169] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0111 23:42:15.144142  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.164731  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.165616  120899 store.go:1414] Monitoring ingresses.extensions count at <storage-prefix>//ingresses
I0111 23:42:15.166875  120899 reflector.go:169] Listing and watching *extensions.Ingress from storage/cacher.go:/ingresses
I0111 23:42:15.166898  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.168410  120899 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.168763  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.168791  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.168963  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.170425  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.175044  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.176075  120899 store.go:1414] Monitoring podsecuritypolicies.policy count at <storage-prefix>//podsecuritypolicies
I0111 23:42:15.177138  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.177788  120899 reflector.go:169] Listing and watching *policy.PodSecurityPolicy from storage/cacher.go:/podsecuritypolicies
I0111 23:42:15.178201  120899 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.179045  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.180073  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.180331  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.182983  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.186558  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.189455  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.193053  120899 store.go:1414] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I0111 23:42:15.193338  120899 reflector.go:169] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I0111 23:42:15.196231  120899 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.199994  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.201102  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.201502  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.202520  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.240492  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.240725  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.240920  120899 store.go:1414] Monitoring networkpolicies.networking.k8s.io count at <storage-prefix>//networkpolicies
I0111 23:42:15.240976  120899 master.go:416] Enabling API group "extensions".
I0111 23:42:15.241368  120899 reflector.go:169] Listing and watching *networking.NetworkPolicy from storage/cacher.go:/networkpolicies
I0111 23:42:15.241348  120899 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.242614  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.242654  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.242795  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.242938  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.244187  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.244316  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.244388  120899 store.go:1414] Monitoring networkpolicies.networking.k8s.io count at <storage-prefix>//networkpolicies
I0111 23:42:15.244421  120899 master.go:416] Enabling API group "networking.k8s.io".
I0111 23:42:15.244425  120899 reflector.go:169] Listing and watching *networking.NetworkPolicy from storage/cacher.go:/networkpolicies
I0111 23:42:15.244827  120899 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.244970  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.245008  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.245080  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.245265  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.246808  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.246974  120899 store.go:1414] Monitoring poddisruptionbudgets.policy count at <storage-prefix>//poddisruptionbudgets
I0111 23:42:15.247355  120899 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.247497  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.247527  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.247577  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.247730  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.247815  120899 reflector.go:169] Listing and watching *policy.PodDisruptionBudget from storage/cacher.go:/poddisruptionbudgets
I0111 23:42:15.248137  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.248725  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.249075  120899 store.go:1414] Monitoring podsecuritypolicies.policy count at <storage-prefix>//podsecuritypolicies
I0111 23:42:15.249105  120899 master.go:416] Enabling API group "policy".
I0111 23:42:15.249149  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.249196  120899 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.249365  120899 reflector.go:169] Listing and watching *policy.PodSecurityPolicy from storage/cacher.go:/podsecuritypolicies
I0111 23:42:15.249414  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.249428  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.249494  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.249666  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.249969  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.250012  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.250084  120899 store.go:1414] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I0111 23:42:15.250111  120899 reflector.go:169] Listing and watching *rbac.Role from storage/cacher.go:/roles
I0111 23:42:15.250472  120899 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.250597  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.250612  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.250656  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.250889  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.251130  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.251238  120899 store.go:1414] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I0111 23:42:15.251320  120899 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.251418  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.251423  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.251438  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.251612  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.251684  120899 reflector.go:169] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I0111 23:42:15.251892  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.252140  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.252264  120899 store.go:1414] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I0111 23:42:15.252967  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.253058  120899 reflector.go:169] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I0111 23:42:15.255424  120899 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.255610  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.255664  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.255779  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.255960  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.256302  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.256504  120899 store.go:1414] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I0111 23:42:15.256586  120899 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.256824  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.256869  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.256948  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.257115  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.257184  120899 reflector.go:169] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I0111 23:42:15.257684  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.257988  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.258100  120899 store.go:1414] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I0111 23:42:15.258617  120899 reflector.go:169] Listing and watching *rbac.Role from storage/cacher.go:/roles
I0111 23:42:15.258694  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.260488  120899 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.275195  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.275299  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.275418  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.275769  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.276801  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.276976  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.277400  120899 store.go:1414] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I0111 23:42:15.277507  120899 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.279049  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.279122  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.279208  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.277516  120899 reflector.go:169] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I0111 23:42:15.279445  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.290117  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.290420  120899 store.go:1414] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I0111 23:42:15.290514  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.290586  120899 reflector.go:169] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I0111 23:42:15.290783  120899 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.290941  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.290959  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.291031  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.291119  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.294120  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.294156  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.294318  120899 store.go:1414] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I0111 23:42:15.294348  120899 master.go:416] Enabling API group "rbac.authorization.k8s.io".
I0111 23:42:15.294422  120899 reflector.go:169] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I0111 23:42:15.297577  120899 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1beta1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.297715  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.297758  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.297849  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.297910  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.299534  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.299672  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.299888  120899 store.go:1414] Monitoring priorityclasses.scheduling.k8s.io count at <storage-prefix>//priorityclasses
I0111 23:42:15.300004  120899 master.go:416] Enabling API group "scheduling.k8s.io".
I0111 23:42:15.300366  120899 master.go:408] Skipping disabled API group "settings.k8s.io".
I0111 23:42:15.300572  120899 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.299966  120899 reflector.go:169] Listing and watching *scheduling.PriorityClass from storage/cacher.go:/priorityclasses
I0111 23:42:15.300694  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.300813  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.300849  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.300886  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.301191  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.301325  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.301460  120899 store.go:1414] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I0111 23:42:15.301512  120899 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.301607  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.301632  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.301679  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.301761  120899 reflector.go:169] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I0111 23:42:15.301982  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.302770  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.302860  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.302991  120899 store.go:1414] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I0111 23:42:15.303064  120899 reflector.go:169] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I0111 23:42:15.303782  120899 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.303915  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.303968  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.304013  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.304183  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.305261  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.305693  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.305855  120899 store.go:1414] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I0111 23:42:15.305934  120899 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.306055  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.306118  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.306167  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.306066  120899 reflector.go:169] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I0111 23:42:15.306352  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.306692  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.306838  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.306855  120899 store.go:1414] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I0111 23:42:15.306872  120899 reflector.go:169] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I0111 23:42:15.307014  120899 master.go:416] Enabling API group "storage.k8s.io".
I0111 23:42:15.307651  120899 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.307833  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.307951  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.308048  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.308143  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.309325  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.309471  120899 store.go:1414] Monitoring deployments.apps count at <storage-prefix>//deployments
I0111 23:42:15.309481  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.309509  120899 reflector.go:169] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0111 23:42:15.309615  120899 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.309680  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.309709  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.309758  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.309901  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.310171  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.310353  120899 store.go:1414] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I0111 23:42:15.310519  120899 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.310627  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.310651  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.310692  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.310720  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.310768  120899 reflector.go:169] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I0111 23:42:15.310852  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.311137  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.311244  120899 store.go:1414] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I0111 23:42:15.311405  120899 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.311486  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.311500  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.311528  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.311612  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.311648  120899 reflector.go:169] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I0111 23:42:15.311896  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.312456  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.312599  120899 store.go:1414] Monitoring deployments.apps count at <storage-prefix>//deployments
I0111 23:42:15.312730  120899 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.312829  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.312841  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.312871  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.312938  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.312962  120899 reflector.go:169] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0111 23:42:15.313094  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.313428  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.313541  120899 store.go:1414] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I0111 23:42:15.313701  120899 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.313823  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.313892  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.313928  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.314002  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.314030  120899 reflector.go:169] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I0111 23:42:15.314164  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.315087  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.315607  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.315949  120899 store.go:1414] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I0111 23:42:15.315985  120899 reflector.go:169] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I0111 23:42:15.316161  120899 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.316258  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.316317  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.316365  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.316432  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.318811  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.319017  120899 store.go:1414] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I0111 23:42:15.319321  120899 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.319447  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.319499  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.319576  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.319725  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.319830  120899 reflector.go:169] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I0111 23:42:15.320089  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.321348  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.321463  120899 store.go:1414] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I0111 23:42:15.321609  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.321627  120899 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.321684  120899 reflector.go:169] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I0111 23:42:15.321711  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.321723  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.321787  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.321918  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.322126  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.322272  120899 store.go:1414] Monitoring deployments.apps count at <storage-prefix>//deployments
I0111 23:42:15.322503  120899 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.322627  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.322662  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.322705  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.322811  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.322859  120899 reflector.go:169] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0111 23:42:15.323044  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.323378  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.323880  120899 store.go:1414] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I0111 23:42:15.324083  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.324136  120899 reflector.go:169] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I0111 23:42:15.324136  120899 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.324273  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.324345  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.324394  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.325027  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.325776  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.325938  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.326416  120899 store.go:1414] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I0111 23:42:15.326518  120899 reflector.go:169] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I0111 23:42:15.327034  120899 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.327144  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.327167  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.327206  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.327265  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.329143  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.329400  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.329655  120899 store.go:1414] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I0111 23:42:15.329796  120899 reflector.go:169] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I0111 23:42:15.329970  120899 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.359185  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.359240  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.359334  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.359433  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.360628  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.360809  120899 store.go:1414] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I0111 23:42:15.360835  120899 master.go:416] Enabling API group "apps".
I0111 23:42:15.360881  120899 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.360977  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.360989  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.361026  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.361114  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.361148  120899 reflector.go:169] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I0111 23:42:15.361451  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.361712  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.361875  120899 store.go:1414] Monitoring validatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//validatingwebhookconfigurations
I0111 23:42:15.361917  120899 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.361989  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.362001  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.362034  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.362111  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.362139  120899 reflector.go:169] Listing and watching *admissionregistration.ValidatingWebhookConfiguration from storage/cacher.go:/validatingwebhookconfigurations
I0111 23:42:15.362382  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.362612  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.362695  120899 store.go:1414] Monitoring mutatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//mutatingwebhookconfigurations
I0111 23:42:15.362708  120899 master.go:416] Enabling API group "admissionregistration.k8s.io".
I0111 23:42:15.362759  120899 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"d246db8b-6af8-44a6-8f81-916ee996e115", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0111 23:42:15.363009  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:15.363028  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:15.363065  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:15.363143  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.363172  120899 reflector.go:169] Listing and watching *admissionregistration.MutatingWebhookConfiguration from storage/cacher.go:/mutatingwebhookconfigurations
I0111 23:42:15.363446  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:15.363645  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:15.363677  120899 store.go:1414] Monitoring events count at <storage-prefix>//events
I0111 23:42:15.363689  120899 master.go:416] Enabling API group "events.k8s.io".
I0111 23:42:15.367245  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
W0111 23:42:15.374191  120899 genericapiserver.go:334] Skipping API batch/v2alpha1 because it has no resources.
W0111 23:42:15.400986  120899 genericapiserver.go:334] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources.
W0111 23:42:15.404920  120899 genericapiserver.go:334] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources.
W0111 23:42:15.410026  120899 genericapiserver.go:334] Skipping API storage.k8s.io/v1alpha1 because it has no resources.
W0111 23:42:15.446932  120899 genericapiserver.go:334] Skipping API admissionregistration.k8s.io/v1alpha1 because it has no resources.
I0111 23:42:15.449962  120899 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0111 23:42:15.449987  120899 healthz.go:170] healthz check poststarthook/bootstrap-controller failed: not finished
I0111 23:42:15.449995  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:15.450003  120899 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0111 23:42:15.450009  120899 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0111 23:42:15.450140  120899 wrap.go:47] GET /healthz: (274.143µs) 500
goroutine 27609 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00c725f10, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00c725f10, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0098ddea0, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0029eed28, 0xc002e441a0, 0x18a, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0029eed28, 0xc00c773e00)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0029eed28, 0xc00c773e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0029eed28, 0xc00c773e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0029eed28, 0xc00c773e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0029eed28, 0xc00c773e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0029eed28, 0xc00c773e00)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0029eed28, 0xc00c773e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0029eed28, 0xc00c773e00)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0029eed28, 0xc00c773e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0029eed28, 0xc00c773e00)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0029eed28, 0xc00c773e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0029eed28, 0xc00c773d00)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0029eed28, 0xc00c773d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00c31cf60, 0xc00dc44ea0, 0x604db80, 0xc0029eed28, 0xc00c773d00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[-]poststarthook/bootstrap-controller failed: reason withheld\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38224]
I0111 23:42:15.451723  120899 wrap.go:47] GET /api/v1/services: (1.11536ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:15.455394  120899 wrap.go:47] GET /api/v1/services: (978.591µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:15.458008  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"namespaces \"default\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc005532360), Code:404}}
I0111 23:42:15.458223  120899 wrap.go:47] GET /api/v1/namespaces/default: (1.142922ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:15.461307  120899 wrap.go:47] POST /api/v1/namespaces: (2.697187ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:15.462735  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"services \"kubernetes\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0056a2120), Code:404}}
I0111 23:42:15.462923  120899 wrap.go:47] GET /api/v1/namespaces/default/services/kubernetes: (1.101091ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:15.466478  120899 wrap.go:47] POST /api/v1/namespaces/default/services: (3.130847ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:15.468403  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"endpoints \"kubernetes\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0053f56e0), Code:404}}
I0111 23:42:15.468622  120899 wrap.go:47] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.283414ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:15.471303  120899 wrap.go:47] POST /api/v1/namespaces/default/endpoints: (1.942494ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:15.474330  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"namespaces \"kube-system\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0056b7260), Code:404}}
I0111 23:42:15.474473  120899 wrap.go:47] GET /api/v1/namespaces/kube-system: (2.252171ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38226]
I0111 23:42:15.474732  120899 wrap.go:47] GET /api/v1/namespaces/default: (2.887757ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:15.476946  120899 wrap.go:47] GET /api/v1/services: (2.091495ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38230]
I0111 23:42:15.477233  120899 wrap.go:47] GET /api/v1/namespaces/default/services/kubernetes: (1.747995ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:15.477486  120899 wrap.go:47] GET /api/v1/services: (2.297729ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38228]
I0111 23:42:15.477614  120899 wrap.go:47] POST /api/v1/namespaces: (2.345523ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38226]
I0111 23:42:15.479329  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"namespaces \"kube-public\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc005ece000), Code:404}}
I0111 23:42:15.479452  120899 wrap.go:47] GET /api/v1/namespaces/kube-public: (1.202278ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38226]
I0111 23:42:15.480496  120899 wrap.go:47] GET /api/v1/namespaces/default/endpoints/kubernetes: (2.480039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:15.481723  120899 wrap.go:47] POST /api/v1/namespaces: (1.976552ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38226]
I0111 23:42:15.482854  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"namespaces \"kube-node-lease\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0056a2f00), Code:404}}
I0111 23:42:15.482981  120899 wrap.go:47] GET /api/v1/namespaces/kube-node-lease: (898.38µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:15.484802  120899 wrap.go:47] POST /api/v1/namespaces: (1.532314ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:15.550981  120899 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0111 23:42:15.551013  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:15.551024  120899 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0111 23:42:15.551031  120899 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0111 23:42:15.551170  120899 wrap.go:47] GET /healthz: (329.217µs) 500
goroutine 27615 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc007d1a700, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc007d1a700, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc008bd5f00, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0029eede0, 0xc000cfa480, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0029eede0, 0xc005ef9c00)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0029eede0, 0xc005ef9c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0029eede0, 0xc005ef9c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0029eede0, 0xc005ef9c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0029eede0, 0xc005ef9c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0029eede0, 0xc005ef9c00)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0029eede0, 0xc005ef9c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0029eede0, 0xc005ef9c00)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0029eede0, 0xc005ef9c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0029eede0, 0xc005ef9c00)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0029eede0, 0xc005ef9c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0029eede0, 0xc005ef9b00)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0029eede0, 0xc005ef9b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00ad9e480, 0xc00dc44ea0, 0x604db80, 0xc0029eede0, 0xc005ef9b00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38224]
I0111 23:42:15.650977  120899 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0111 23:42:15.651011  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:15.651023  120899 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0111 23:42:15.651030  120899 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0111 23:42:15.651175  120899 wrap.go:47] GET /healthz: (329.574µs) 500
goroutine 27691 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc007d38690, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc007d38690, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003003880, 0x1f4)
net/http.Error(0x7eff88166590, 0xc002208500, 0xc002c96900, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc002208500, 0xc00240b300)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc002208500, 0xc00240b300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc002208500, 0xc00240b300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc002208500, 0xc00240b300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc002208500, 0xc00240b300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc002208500, 0xc00240b300)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc002208500, 0xc00240b300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc002208500, 0xc00240b300)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc002208500, 0xc00240b300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc002208500, 0xc00240b300)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc002208500, 0xc00240b300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc002208500, 0xc00240b100)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc002208500, 0xc00240b100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc004054840, 0xc00dc44ea0, 0x604db80, 0xc002208500, 0xc00240b100)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38224]
I0111 23:42:15.750920  120899 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0111 23:42:15.750956  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:15.750989  120899 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0111 23:42:15.750997  120899 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0111 23:42:15.751156  120899 wrap.go:47] GET /healthz: (363.189µs) 500
goroutine 27714 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00c41ef50, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00c41ef50, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009a4bbe0, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0094ae140, 0xc00e486300, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0094ae140, 0xc00c111500)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0094ae140, 0xc00c111500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0094ae140, 0xc00c111500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0094ae140, 0xc00c111500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0094ae140, 0xc00c111500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0094ae140, 0xc00c111500)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0094ae140, 0xc00c111500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0094ae140, 0xc00c111500)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0094ae140, 0xc00c111500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0094ae140, 0xc00c111500)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0094ae140, 0xc00c111500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0094ae140, 0xc00c111400)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0094ae140, 0xc00c111400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0098400c0, 0xc00dc44ea0, 0x604db80, 0xc0094ae140, 0xc00c111400)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38224]
I0111 23:42:15.850920  120899 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0111 23:42:15.850959  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:15.850969  120899 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0111 23:42:15.850976  120899 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0111 23:42:15.851112  120899 wrap.go:47] GET /healthz: (327.527µs) 500
goroutine 27617 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc007d1a850, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc007d1a850, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0028e4500, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0029eee28, 0xc000cfaa80, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0029eee28, 0xc00288c500)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0029eee28, 0xc00288c500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0029eee28, 0xc00288c500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0029eee28, 0xc00288c500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0029eee28, 0xc00288c500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0029eee28, 0xc00288c500)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0029eee28, 0xc00288c500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0029eee28, 0xc00288c500)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0029eee28, 0xc00288c500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0029eee28, 0xc00288c500)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0029eee28, 0xc00288c500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0029eee28, 0xc00288c400)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0029eee28, 0xc00288c400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00ad9e720, 0xc00dc44ea0, 0x604db80, 0xc0029eee28, 0xc00288c400)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38224]
I0111 23:42:15.950957  120899 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0111 23:42:15.950991  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:15.951000  120899 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0111 23:42:15.951006  120899 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0111 23:42:15.951130  120899 wrap.go:47] GET /healthz: (286.545µs) 500
goroutine 27716 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00c41f180, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00c41f180, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009a4bce0, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0094ae168, 0xc00e486780, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0094ae168, 0xc00281c100)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0094ae168, 0xc00281c100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0094ae168, 0xc00281c100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0094ae168, 0xc00281c100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0094ae168, 0xc00281c100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0094ae168, 0xc00281c100)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0094ae168, 0xc00281c100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0094ae168, 0xc00281c100)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0094ae168, 0xc00281c100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0094ae168, 0xc00281c100)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0094ae168, 0xc00281c100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0094ae168, 0xc00281c000)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0094ae168, 0xc00281c000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0098403c0, 0xc00dc44ea0, 0x604db80, 0xc0094ae168, 0xc00281c000)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38224]
I0111 23:42:16.050896  120899 healthz.go:170] healthz check etcd failed: etcd client connection not yet established
I0111 23:42:16.050924  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:16.050935  120899 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0111 23:42:16.050943  120899 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0111 23:42:16.051111  120899 wrap.go:47] GET /healthz: (339.268µs) 500
goroutine 27673 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc007d2b8f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc007d2b8f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0036a56a0, 0x1f4)
net/http.Error(0x7eff88166590, 0xc009086260, 0xc0024e7200, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc009086260, 0xc004a50800)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc009086260, 0xc004a50800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc009086260, 0xc004a50800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc009086260, 0xc004a50800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc009086260, 0xc004a50800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc009086260, 0xc004a50800)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc009086260, 0xc004a50800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc009086260, 0xc004a50800)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc009086260, 0xc004a50800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc009086260, 0xc004a50800)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc009086260, 0xc004a50800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc009086260, 0xc004a50700)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc009086260, 0xc004a50700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00b5d6ea0, 0xc00dc44ea0, 0x604db80, 0xc009086260, 0xc004a50700)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38224]
I0111 23:42:16.072234  120899 clientconn.go:551] parsed scheme: ""
I0111 23:42:16.072307  120899 clientconn.go:557] scheme "" not registered, fallback to default scheme
I0111 23:42:16.072361  120899 resolver_conn_wrapper.go:116] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0111 23:42:16.072439  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:16.072785  120899 balancer_v1_wrapper.go:245] clientv3/balancer: pin "127.0.0.1:2379"
I0111 23:42:16.072874  120899 balancer_v1_wrapper.go:125] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0111 23:42:16.151818  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:16.151845  120899 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0111 23:42:16.151852  120899 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0111 23:42:16.151998  120899 wrap.go:47] GET /healthz: (1.16319ms) 500
goroutine 27718 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00c41f3b0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00c41f3b0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009a4bf80, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0094ae190, 0xc0027d4420, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0094ae190, 0xc00281cd00)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0094ae190, 0xc00281cd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0094ae190, 0xc00281cd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0094ae190, 0xc00281cd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0094ae190, 0xc00281cd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0094ae190, 0xc00281cd00)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0094ae190, 0xc00281cd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0094ae190, 0xc00281cd00)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0094ae190, 0xc00281cd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0094ae190, 0xc00281cd00)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0094ae190, 0xc00281cd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0094ae190, 0xc00281cb00)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0094ae190, 0xc00281cb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0098408a0, 0xc00dc44ea0, 0x604db80, 0xc0094ae190, 0xc00281cb00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38224]
I0111 23:42:16.251840  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:16.251874  120899 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0111 23:42:16.251882  120899 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0111 23:42:16.252037  120899 wrap.go:47] GET /healthz: (1.217593ms) 500
goroutine 27732 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc007d38850, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc007d38850, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003003cc0, 0x1f4)
net/http.Error(0x7eff88166590, 0xc002208530, 0xc0027d46e0, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc002208530, 0xc00240bb00)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc002208530, 0xc00240bb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc002208530, 0xc00240bb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc002208530, 0xc00240bb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc002208530, 0xc00240bb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc002208530, 0xc00240bb00)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc002208530, 0xc00240bb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc002208530, 0xc00240bb00)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc002208530, 0xc00240bb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc002208530, 0xc00240bb00)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc002208530, 0xc00240bb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc002208530, 0xc00240b900)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc002208530, 0xc00240b900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc004055140, 0xc00dc44ea0, 0x604db80, 0xc002208530, 0xc00240b900)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38224]
I0111 23:42:16.351989  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:16.352017  120899 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0111 23:42:16.352026  120899 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0111 23:42:16.352169  120899 wrap.go:47] GET /healthz: (1.306145ms) 500
goroutine 27679 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc007d2bab0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc007d2bab0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0036a5c80, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0090862e0, 0xc0093e6160, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0090862e0, 0xc004a51000)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0090862e0, 0xc004a51000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0090862e0, 0xc004a51000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0090862e0, 0xc004a51000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0090862e0, 0xc004a51000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0090862e0, 0xc004a51000)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0090862e0, 0xc004a51000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0090862e0, 0xc004a51000)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0090862e0, 0xc004a51000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0090862e0, 0xc004a51000)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0090862e0, 0xc004a51000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0090862e0, 0xc004a50f00)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0090862e0, 0xc004a50f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00b5d7500, 0xc00dc44ea0, 0x604db80, 0xc0090862e0, 0xc004a50f00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38224]
I0111 23:42:16.451590  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:16.451619  120899 healthz.go:170] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0111 23:42:16.451628  120899 healthz.go:170] healthz check poststarthook/ca-registration failed: not finished
I0111 23:42:16.451789  120899 wrap.go:47] GET /healthz: (963.339µs) 500
goroutine 27728 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00c41fb90, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00c41fb90, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0028d7d40, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0094ae1f8, 0xc002f82580, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0094ae1f8, 0xc002720900)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0094ae1f8, 0xc002720900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0094ae1f8, 0xc002720900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0094ae1f8, 0xc002720900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0094ae1f8, 0xc002720900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0094ae1f8, 0xc002720900)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0094ae1f8, 0xc002720900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0094ae1f8, 0xc002720900)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0094ae1f8, 0xc002720900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0094ae1f8, 0xc002720900)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0094ae1f8, 0xc002720900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0094ae1f8, 0xc002720700)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0094ae1f8, 0xc002720700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0098414a0, 0xc00dc44ea0, 0x604db80, 0xc0094ae1f8, 0xc002720700)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38390]
I0111 23:42:16.451922  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"priorityclasses.scheduling.k8s.io \"system-node-critical\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006101140), Code:404}}
I0111 23:42:16.452085  120899 wrap.go:47] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-node-critical: (2.303499ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38228]
I0111 23:42:16.452327  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.309892ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:16.452355  120899 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.939903ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38388]
I0111 23:42:16.454362  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"configmaps \"extension-apiserver-authentication\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc005de70e0), Code:404}}
I0111 23:42:16.454433  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.748188ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:16.454488  120899 wrap.go:47] GET /api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication: (1.689781ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.454655  120899 wrap.go:47] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (1.651449ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.454857  120899 storage_scheduling.go:91] created PriorityClass system-node-critical with value 2000001000
I0111 23:42:16.455895  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:aggregate-to-admin\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006297140), Code:404}}
I0111 23:42:16.456017  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (1.24294ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:16.456134  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"priorityclasses.scheduling.k8s.io \"system-cluster-critical\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0062974a0), Code:404}}
I0111 23:42:16.456212  120899 wrap.go:47] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-cluster-critical: (1.238335ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.456525  120899 wrap.go:47] POST /api/v1/namespaces/kube-system/configmaps: (1.724179ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.457713  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"admin\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc005ecf320), Code:404}}
I0111 23:42:16.457871  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (1.542344ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38224]
I0111 23:42:16.457961  120899 wrap.go:47] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (1.398537ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.458102  120899 storage_scheduling.go:91] created PriorityClass system-cluster-critical with value 2000000000
I0111 23:42:16.458112  120899 storage_scheduling.go:100] all system priority classes are created successfully or already exist.
I0111 23:42:16.459155  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:aggregate-to-edit\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0063d4480), Code:404}}
I0111 23:42:16.459300  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (932.259µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.460294  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"edit\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006101c20), Code:404}}
I0111 23:42:16.460407  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (823.487µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.461443  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:aggregate-to-view\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006177080), Code:404}}
I0111 23:42:16.461558  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (828.427µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.462575  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"view\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0063f0480), Code:404}}
I0111 23:42:16.462676  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (816.664µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.463657  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"cluster-admin\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0063ea1e0), Code:404}}
I0111 23:42:16.463799  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/cluster-admin: (814.895µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.466683  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.069553ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.467388  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/cluster-admin
I0111 23:42:16.468695  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:discovery\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0063ead20), Code:404}}
I0111 23:42:16.468891  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:discovery: (1.255672ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.471145  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.922865ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.471404  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:discovery
I0111 23:42:16.472583  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:basic-user\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00643ca20), Code:404}}
I0111 23:42:16.472758  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:basic-user: (1.180019ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.474799  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.727644ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.475110  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:basic-user
I0111 23:42:16.475994  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"admin\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0063f1680), Code:404}}
I0111 23:42:16.476116  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (861.584µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.478246  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.746561ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.478679  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/admin
I0111 23:42:16.479614  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"edit\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006492ea0), Code:404}}
I0111 23:42:16.479758  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (884.91µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.481816  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.722944ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.482029  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/edit
I0111 23:42:16.483030  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"view\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00636df80), Code:404}}
I0111 23:42:16.483159  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (926.463µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.485461  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.914283ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.485658  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/view
I0111 23:42:16.486728  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:aggregate-to-admin\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006550f60), Code:404}}
I0111 23:42:16.486886  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (1.049613ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.489348  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.107186ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.489609  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-admin
I0111 23:42:16.489702  120899 cacher.go:598] cacher (*rbac.ClusterRole): 1 objects queued in incoming channel.
I0111 23:42:16.490661  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:aggregate-to-edit\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0061cf3e0), Code:404}}
I0111 23:42:16.490823  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (971.739µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.494078  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.799375ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.494389  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-edit
I0111 23:42:16.495960  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:aggregate-to-view\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006502720), Code:404}}
I0111 23:42:16.496087  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (1.521825ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.498878  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.375738ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.499247  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-view
I0111 23:42:16.500858  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:heapster\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00654cd20), Code:404}}
I0111 23:42:16.501006  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:heapster: (1.433119ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.503981  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.543946ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.504206  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:heapster
I0111 23:42:16.505400  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:node\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc005ed9860), Code:404}}
I0111 23:42:16.505530  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node: (1.10494ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.508050  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.107945ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.508407  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node
I0111 23:42:16.509409  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:node-problem-detector\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006503bc0), Code:404}}
I0111 23:42:16.509536  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-problem-detector: (933.827µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.511697  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.756326ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.511967  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node-problem-detector
I0111 23:42:16.512833  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:node-proxier\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0068591a0), Code:404}}
I0111 23:42:16.513012  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-proxier: (895.741µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.516143  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.75293ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.516367  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node-proxier
I0111 23:42:16.517246  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:kubelet-api-admin\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006a4e4e0), Code:404}}
I0111 23:42:16.517399  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kubelet-api-admin: (876.332µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.519377  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.680473ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.519725  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kubelet-api-admin
I0111 23:42:16.520614  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:node-bootstrapper\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0069c0780), Code:404}}
I0111 23:42:16.520814  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-bootstrapper: (864.778µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.522805  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.637079ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.523010  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:node-bootstrapper
I0111 23:42:16.523865  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:auth-delegator\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006bb6000), Code:404}}
I0111 23:42:16.523979  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:auth-delegator: (774.103µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.526121  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.73945ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.526353  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:auth-delegator
I0111 23:42:16.527327  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:kube-aggregator\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0069c1aa0), Code:404}}
I0111 23:42:16.527455  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-aggregator: (879.283µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.529404  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.562212ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.529627  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-aggregator
I0111 23:42:16.530697  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:kube-controller-manager\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006e00240), Code:404}}
I0111 23:42:16.530843  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-controller-manager: (982.497µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.533089  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.851503ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.533403  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-controller-manager
I0111 23:42:16.534385  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:kube-scheduler\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006e456e0), Code:404}}
I0111 23:42:16.534534  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-scheduler: (899.723µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.536735  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.702701ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.536992  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-scheduler
I0111 23:42:16.537927  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:kube-dns\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0073fd6e0), Code:404}}
I0111 23:42:16.538063  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-dns: (835.243µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.540442  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.000195ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.540642  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:kube-dns
I0111 23:42:16.541729  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:persistent-volume-provisioner\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0070a51a0), Code:404}}
I0111 23:42:16.541895  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:persistent-volume-provisioner: (1.057735ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.546089  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.833681ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.546367  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:persistent-volume-provisioner
I0111 23:42:16.547612  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:csi-external-attacher\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006b06fc0), Code:404}}
I0111 23:42:16.547833  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-attacher: (1.247537ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.550490  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.166852ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.550724  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:csi-external-attacher
I0111 23:42:16.551861  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:aws-cloud-provider\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0076bb5c0), Code:404}}
I0111 23:42:16.552064  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aws-cloud-provider: (973.291µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.552161  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:16.552383  120899 wrap.go:47] GET /healthz: (1.725962ms) 500
goroutine 27882 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc007e0e930, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc007e0e930, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc002dcaa00, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0094ae950, 0xc004aa68c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0094ae950, 0xc003910800)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0094ae950, 0xc003910800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0094ae950, 0xc003910800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0094ae950, 0xc003910800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0094ae950, 0xc003910800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0094ae950, 0xc003910800)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0094ae950, 0xc003910800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0094ae950, 0xc003910800)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0094ae950, 0xc003910800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0094ae950, 0xc003910800)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0094ae950, 0xc003910800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0094ae950, 0xc003910700)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0094ae950, 0xc003910700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0085d2d80, 0xc00dc44ea0, 0x604db80, 0xc0094ae950, 0xc003910700)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38390]
I0111 23:42:16.554509  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.708128ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.556077  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:aws-cloud-provider
I0111 23:42:16.557218  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:certificates.k8s.io:certificatesigningrequests:nodeclient\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0064b3aa0), Code:404}}
I0111 23:42:16.557402  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:nodeclient: (1.073982ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.559576  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.779141ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.559770  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:nodeclient
I0111 23:42:16.563630  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:certificates.k8s.io:certificatesigningrequests:selfnodeclient\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0062feea0), Code:404}}
I0111 23:42:16.563824  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient: (3.900216ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.566659  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.433485ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.566962  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
I0111 23:42:16.568069  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:volume-scheduler\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc007b695c0), Code:404}}
I0111 23:42:16.568199  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:volume-scheduler: (1.036589ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.570765  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.006349ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.571178  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:volume-scheduler
I0111 23:42:16.572534  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:csi-external-provisioner\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc007f62240), Code:404}}
I0111 23:42:16.572698  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-provisioner: (1.201752ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.575233  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.097157ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.575913  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:csi-external-provisioner
I0111 23:42:16.577029  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:attachdetach-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc007e7e6c0), Code:404}}
I0111 23:42:16.577178  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:attachdetach-controller: (1.05849ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.579979  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.113027ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.580412  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I0111 23:42:16.581450  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:clusterrole-aggregation-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0065a0780), Code:404}}
I0111 23:42:16.581594  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:clusterrole-aggregation-controller: (1.025008ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.583921  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.828973ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.584116  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I0111 23:42:16.585113  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:cronjob-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc007c7e300), Code:404}}
I0111 23:42:16.585255  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:cronjob-controller: (906.836µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.587395  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.696781ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.587596  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:cronjob-controller
I0111 23:42:16.588513  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:daemon-set-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc007c7f8c0), Code:404}}
I0111 23:42:16.588665  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:daemon-set-controller: (876.525µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.593159  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (4.094556ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.593441  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I0111 23:42:16.596655  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:deployment-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc008c13980), Code:404}}
I0111 23:42:16.596849  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:deployment-controller: (3.248704ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.599160  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.908436ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.599456  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:deployment-controller
I0111 23:42:16.600541  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:disruption-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc007e7fce0), Code:404}}
I0111 23:42:16.600673  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:disruption-controller: (1.041641ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.603357  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.122046ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.603593  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:disruption-controller
I0111 23:42:16.604848  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:endpoint-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00927ecc0), Code:404}}
I0111 23:42:16.605019  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:endpoint-controller: (1.015659ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.607392  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.909983ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.607622  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:endpoint-controller
I0111 23:42:16.608629  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:expand-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc009564840), Code:404}}
I0111 23:42:16.608797  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:expand-controller: (927.131µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.611087  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.898772ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.611569  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:expand-controller
I0111 23:42:16.612612  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:generic-garbage-collector\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0095072c0), Code:404}}
I0111 23:42:16.612925  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:generic-garbage-collector: (1.181584ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.615595  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.812191ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.615809  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I0111 23:42:16.616984  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:horizontal-pod-autoscaler\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00971f560), Code:404}}
I0111 23:42:16.617115  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:horizontal-pod-autoscaler: (1.117305ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.619726  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.225295ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.619967  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I0111 23:42:16.621037  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:job-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0098645a0), Code:404}}
I0111 23:42:16.621211  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:job-controller: (1.080687ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.623607  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.991187ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.624025  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:job-controller
I0111 23:42:16.625030  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:namespace-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006b4daa0), Code:404}}
I0111 23:42:16.625181  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:namespace-controller: (963.501µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.627432  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.864959ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.627697  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:namespace-controller
I0111 23:42:16.628721  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:node-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006a58960), Code:404}}
I0111 23:42:16.628897  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:node-controller: (1.001877ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.631094  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.887796ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.631538  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:node-controller
I0111 23:42:16.632503  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:persistent-volume-binder\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0020157a0), Code:404}}
I0111 23:42:16.632646  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:persistent-volume-binder: (872.673µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.635100  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.084263ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.635573  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I0111 23:42:16.636562  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:pod-garbage-collector\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0079d0480), Code:404}}
I0111 23:42:16.636893  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pod-garbage-collector: (1.119403ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.639071  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.75491ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.639379  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I0111 23:42:16.640584  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:replicaset-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc002016e40), Code:404}}
I0111 23:42:16.640765  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replicaset-controller: (1.185348ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.644477  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.208028ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.644763  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:replicaset-controller
I0111 23:42:16.646666  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:replication-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0092d8000), Code:404}}
I0111 23:42:16.646866  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replication-controller: (1.353627ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.649412  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.987692ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.649716  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:replication-controller
I0111 23:42:16.650703  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:resourcequota-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc007717020), Code:404}}
I0111 23:42:16.650860  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:resourcequota-controller: (936.482µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.652913  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.695173ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.653163  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I0111 23:42:16.654314  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:route-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0010a7f80), Code:404}}
I0111 23:42:16.654456  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:route-controller: (1.074725ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.655025  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:16.655170  120899 wrap.go:47] GET /healthz: (1.131025ms) 500
goroutine 27945 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc007e32d90, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc007e32d90, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0031b53c0, 0x1f4)
net/http.Error(0x7eff88166590, 0xc008f02068, 0xc002498a00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc008f02068, 0xc0065b8a00)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc008f02068, 0xc0065b8a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc008f02068, 0xc0065b8a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc008f02068, 0xc0065b8a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc008f02068, 0xc0065b8a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc008f02068, 0xc0065b8a00)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc008f02068, 0xc0065b8a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc008f02068, 0xc0065b8a00)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc008f02068, 0xc0065b8a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc008f02068, 0xc0065b8a00)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc008f02068, 0xc0065b8a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc008f02068, 0xc0065b8900)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc008f02068, 0xc0065b8900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0058f7aa0, 0xc00dc44ea0, 0x604db80, 0xc008f02068, 0xc0065b8900)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:16.657101  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.747636ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.657321  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:route-controller
I0111 23:42:16.658229  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:service-account-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a1c3020), Code:404}}
I0111 23:42:16.658973  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-account-controller: (1.499743ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.660911  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.600658ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.661147  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:service-account-controller
I0111 23:42:16.662307  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:service-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a3003c0), Code:404}}
I0111 23:42:16.662697  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-controller: (1.298479ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.664909  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.801731ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.665168  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:service-controller
I0111 23:42:16.666133  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:statefulset-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a258480), Code:404}}
I0111 23:42:16.666315  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:statefulset-controller: (941.599µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.668888  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.220602ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.669074  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:statefulset-controller
I0111 23:42:16.670198  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:ttl-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a3e2300), Code:404}}
I0111 23:42:16.670361  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:ttl-controller: (1.12742ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.672536  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.87533ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.672800  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:ttl-controller
I0111 23:42:16.673728  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:certificate-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a2593e0), Code:404}}
I0111 23:42:16.673913  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:certificate-controller: (936.629µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.676058  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.679402ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.676294  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:certificate-controller
I0111 23:42:16.677295  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:pvc-protection-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a115a40), Code:404}}
I0111 23:42:16.677445  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pvc-protection-controller: (1.002118ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.696256  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.394966ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.696730  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I0111 23:42:16.711472  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterroles.rbac.authorization.k8s.io \"system:controller:pv-protection-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a5f8300), Code:404}}
I0111 23:42:16.711675  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pv-protection-controller: (1.81048ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.732219  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.367215ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.732494  120899 storage_rbac.go:187] created clusterrole.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I0111 23:42:16.751596  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"cluster-admin\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a5f93e0), Code:404}}
I0111 23:42:16.751798  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/cluster-admin: (1.92409ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.752808  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:16.752983  120899 wrap.go:47] GET /healthz: (1.735964ms) 500
goroutine 28056 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc0080ceaf0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc0080ceaf0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00373b6a0, 0x1f4)
net/http.Error(0x7eff88166590, 0xc008f024d8, 0xc004aa6c80, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc008f024d8, 0xc0077bd000)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc008f024d8, 0xc0077bd000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc008f024d8, 0xc0077bd000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc008f024d8, 0xc0077bd000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc008f024d8, 0xc0077bd000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc008f024d8, 0xc0077bd000)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc008f024d8, 0xc0077bd000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc008f024d8, 0xc0077bd000)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc008f024d8, 0xc0077bd000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc008f024d8, 0xc0077bd000)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc008f024d8, 0xc0077bd000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc008f024d8, 0xc0077bcf00)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc008f024d8, 0xc0077bcf00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0064d6d20, 0xc00dc44ea0, 0x604db80, 0xc008f024d8, 0xc0077bcf00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:16.772477  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.619318ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.772759  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/cluster-admin
I0111 23:42:16.791391  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:discovery\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a3016e0), Code:404}}
I0111 23:42:16.791630  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:discovery: (1.814793ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.812624  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.785556ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.812918  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:discovery
I0111 23:42:16.830974  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:basic-user\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a4d9d40), Code:404}}
I0111 23:42:16.831171  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:basic-user: (1.383938ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.852666  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.811679ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:16.853307  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:basic-user
I0111 23:42:16.854271  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:16.854496  120899 wrap.go:47] GET /healthz: (1.654559ms) 500
goroutine 28066 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc007e45ce0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc007e45ce0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0037a8680, 0x1f4)
net/http.Error(0x7eff88166590, 0xc009087360, 0xc002506dc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc009087360, 0xc0022f9900)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc009087360, 0xc0022f9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc009087360, 0xc0022f9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc009087360, 0xc0022f9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc009087360, 0xc0022f9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc009087360, 0xc0022f9900)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc009087360, 0xc0022f9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc009087360, 0xc0022f9900)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc009087360, 0xc0022f9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc009087360, 0xc0022f9900)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc009087360, 0xc0022f9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc009087360, 0xc0022f9700)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc009087360, 0xc0022f9700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006504de0, 0xc00dc44ea0, 0x604db80, 0xc009087360, 0xc0022f9700)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38390]
I0111 23:42:16.871013  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:node-proxier\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0092d8fc0), Code:404}}
I0111 23:42:16.871410  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node-proxier: (1.584158ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.892523  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.666069ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.892840  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:node-proxier
I0111 23:42:16.911189  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:kube-controller-manager\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc004a00960), Code:404}}
I0111 23:42:16.911432  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-controller-manager: (1.469906ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.932431  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.631083ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.932706  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-controller-manager
I0111 23:42:16.951034  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:kube-dns\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a75a2a0), Code:404}}
I0111 23:42:16.951224  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-dns: (1.387119ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.952384  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:16.952561  120899 wrap.go:47] GET /healthz: (1.008325ms) 500
goroutine 28048 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc008099c00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc008099c00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0037ed200, 0x1f4)
net/http.Error(0x7eff88166590, 0xc00d4b11c0, 0xc000077180, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc00d4b11c0, 0xc006ba1500)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc00d4b11c0, 0xc006ba1500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc00d4b11c0, 0xc006ba1500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc00d4b11c0, 0xc006ba1500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc00d4b11c0, 0xc006ba1500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc00d4b11c0, 0xc006ba1500)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc00d4b11c0, 0xc006ba1500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc00d4b11c0, 0xc006ba1500)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc00d4b11c0, 0xc006ba1500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc00d4b11c0, 0xc006ba1500)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc00d4b11c0, 0xc006ba1500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc00d4b11c0, 0xc006ba1400)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc00d4b11c0, 0xc006ba1400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0064c2900, 0xc00dc44ea0, 0x604db80, 0xc00d4b11c0, 0xc006ba1400)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:16.972167  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.333288ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:16.972515  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-dns
I0111 23:42:16.991070  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:kube-scheduler\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a301bc0), Code:404}}
I0111 23:42:16.991257  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-scheduler: (1.440152ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.012451  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.601742ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.012715  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-scheduler
I0111 23:42:17.030969  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:aws-cloud-provider\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a6f5f80), Code:404}}
I0111 23:42:17.031157  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:aws-cloud-provider: (1.344433ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.053113  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.258785ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.053317  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:17.053483  120899 wrap.go:47] GET /healthz: (2.679889ms) 500
goroutine 28117 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00a6b2000, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00a6b2000, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0038780c0, 0x1f4)
net/http.Error(0x7eff88166590, 0xc00d4b1268, 0xc000dd3040, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc00d4b1268, 0xc003060500)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc00d4b1268, 0xc003060500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc00d4b1268, 0xc003060500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc00d4b1268, 0xc003060500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc00d4b1268, 0xc003060500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc00d4b1268, 0xc003060500)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc00d4b1268, 0xc003060500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc00d4b1268, 0xc003060500)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc00d4b1268, 0xc003060500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc00d4b1268, 0xc003060500)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc00d4b1268, 0xc003060500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc00d4b1268, 0xc003060400)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc00d4b1268, 0xc003060400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0064c31a0, 0xc00dc44ea0, 0x604db80, 0xc00d4b1268, 0xc003060400)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:17.053894  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:aws-cloud-provider
I0111 23:42:17.071018  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:volume-scheduler\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a796b40), Code:404}}
I0111 23:42:17.071216  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:volume-scheduler: (1.356165ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.092182  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.378848ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.092442  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:volume-scheduler
I0111 23:42:17.110930  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:node\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a7d3ce0), Code:404}}
I0111 23:42:17.111103  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node: (1.252399ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.132386  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.487859ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.132629  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:node
I0111 23:42:17.152332  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:17.152331  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:attachdetach-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a797500), Code:404}}
I0111 23:42:17.152498  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:attachdetach-controller: (1.151485ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.152500  120899 wrap.go:47] GET /healthz: (1.246077ms) 500
goroutine 28134 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00808de30, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00808de30, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003888740, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0029bc1a8, 0xc000dd3540, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0029bc1a8, 0xc0036a8a00)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0029bc1a8, 0xc0036a8a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0029bc1a8, 0xc0036a8a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0029bc1a8, 0xc0036a8a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0029bc1a8, 0xc0036a8a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0029bc1a8, 0xc0036a8a00)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0029bc1a8, 0xc0036a8a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0029bc1a8, 0xc0036a8a00)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0029bc1a8, 0xc0036a8a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0029bc1a8, 0xc0036a8a00)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0029bc1a8, 0xc0036a8a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0029bc1a8, 0xc0036a8800)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0029bc1a8, 0xc0036a8800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0066a92c0, 0xc00dc44ea0, 0x604db80, 0xc0029bc1a8, 0xc0036a8800)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:17.173700  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.793416ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.174027  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I0111 23:42:17.190789  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:clusterrole-aggregation-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a94ce40), Code:404}}
I0111 23:42:17.191000  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:clusterrole-aggregation-controller: (1.181481ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.211939  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.088439ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.212243  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I0111 23:42:17.231048  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:cronjob-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a94d380), Code:404}}
I0111 23:42:17.231237  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:cronjob-controller: (1.417258ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.252234  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.516404ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.252537  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:cronjob-controller
I0111 23:42:17.253113  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:17.253321  120899 wrap.go:47] GET /healthz: (2.708164ms) 500
goroutine 28029 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00806b880, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00806b880, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0038b3680, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0094af058, 0xc000dd3900, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0094af058, 0xc005dd9500)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0094af058, 0xc005dd9500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0094af058, 0xc005dd9500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0094af058, 0xc005dd9500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0094af058, 0xc005dd9500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0094af058, 0xc005dd9500)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0094af058, 0xc005dd9500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0094af058, 0xc005dd9500)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0094af058, 0xc005dd9500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0094af058, 0xc005dd9500)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0094af058, 0xc005dd9500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0094af058, 0xc005dd9400)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0094af058, 0xc005dd9400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006787020, 0xc00dc44ea0, 0x604db80, 0xc0094af058, 0xc005dd9400)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:17.276339  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:daemon-set-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a99b2c0), Code:404}}
I0111 23:42:17.276578  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:daemon-set-controller: (6.809937ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:17.292184  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.193598ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:17.292481  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I0111 23:42:17.311398  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:deployment-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a9c73e0), Code:404}}
I0111 23:42:17.311592  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:deployment-controller: (1.767961ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:17.333031  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.182817ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:17.335389  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:deployment-controller
I0111 23:42:17.351053  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:disruption-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a9c7800), Code:404}}
I0111 23:42:17.351220  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:disruption-controller: (1.395867ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:17.351886  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:17.352027  120899 wrap.go:47] GET /healthz: (1.324213ms) 500
goroutine 28149 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00a774cb0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00a774cb0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc0039733e0, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0029bc3d8, 0xc000077a40, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0029bc3d8, 0xc00650f900)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0029bc3d8, 0xc00650f900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0029bc3d8, 0xc00650f900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0029bc3d8, 0xc00650f900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0029bc3d8, 0xc00650f900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0029bc3d8, 0xc00650f900)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0029bc3d8, 0xc00650f900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0029bc3d8, 0xc00650f900)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0029bc3d8, 0xc00650f900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0029bc3d8, 0xc00650f900)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0029bc3d8, 0xc00650f900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0029bc3d8, 0xc00650f800)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0029bc3d8, 0xc00650f800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0068563c0, 0xc00dc44ea0, 0x604db80, 0xc0029bc3d8, 0xc00650f800)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38390]
I0111 23:42:17.372666  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.840821ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.372969  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:disruption-controller
I0111 23:42:17.390775  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:endpoint-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00aa9c060), Code:404}}
I0111 23:42:17.390987  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:endpoint-controller: (1.164504ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.412144  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.37018ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.412427  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:endpoint-controller
I0111 23:42:17.430993  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:expand-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00aa845a0), Code:404}}
I0111 23:42:17.431186  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:expand-controller: (1.357729ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.452975  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:17.453142  120899 wrap.go:47] GET /healthz: (1.346236ms) 500
goroutine 28213 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00a6772d0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00a6772d0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003a3e680, 0x1f4)
net/http.Error(0x7eff88166590, 0xc008f02990, 0xc004aa7180, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc008f02990, 0xc00a812500)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc008f02990, 0xc00a812500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc008f02990, 0xc00a812500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc008f02990, 0xc00a812500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc008f02990, 0xc00a812500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc008f02990, 0xc00a812500)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc008f02990, 0xc00a812500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc008f02990, 0xc00a812500)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc008f02990, 0xc00a812500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc008f02990, 0xc00a812500)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc008f02990, 0xc00a812500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc008f02990, 0xc00a812400)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc008f02990, 0xc00a812400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006c990e0, 0xc00dc44ea0, 0x604db80, 0xc008f02990, 0xc00a812400)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:17.454187  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (4.34495ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.454443  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:expand-controller
I0111 23:42:17.470912  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:generic-garbage-collector\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc006b0f5c0), Code:404}}
I0111 23:42:17.471103  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:generic-garbage-collector: (1.316799ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.491957  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.104086ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.492204  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I0111 23:42:17.510988  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:horizontal-pod-autoscaler\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00aa85680), Code:404}}
I0111 23:42:17.511188  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:horizontal-pod-autoscaler: (1.365059ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.532508  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.59667ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.532982  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I0111 23:42:17.551826  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:job-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00a3e3aa0), Code:404}}
I0111 23:42:17.552069  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:job-controller: (2.158931ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.553226  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:17.553517  120899 wrap.go:47] GET /healthz: (956.743µs) 500
goroutine 28203 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00a6a5a40, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00a6a5a40, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003a7c5e0, 0x1f4)
net/http.Error(0x7eff88166590, 0xc003a8a9f0, 0xc002507680, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc003a8a9f0, 0xc009825c00)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc003a8a9f0, 0xc009825c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc003a8a9f0, 0xc009825c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc003a8a9f0, 0xc009825c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc003a8a9f0, 0xc009825c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc003a8a9f0, 0xc009825c00)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc003a8a9f0, 0xc009825c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc003a8a9f0, 0xc009825c00)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc003a8a9f0, 0xc009825c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc003a8a9f0, 0xc009825c00)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc003a8a9f0, 0xc009825c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc003a8a9f0, 0xc009825b00)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc003a8a9f0, 0xc009825b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006e8a720, 0xc00dc44ea0, 0x604db80, 0xc003a8a9f0, 0xc009825b00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:17.572385  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.551619ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.572658  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:job-controller
I0111 23:42:17.591101  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:namespace-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ac08060), Code:404}}
I0111 23:42:17.591350  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:namespace-controller: (1.509858ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.612506  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.678756ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.612723  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:namespace-controller
I0111 23:42:17.631029  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:node-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ac08480), Code:404}}
I0111 23:42:17.631238  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:node-controller: (1.416221ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.652913  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.026591ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.653165  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:node-controller
I0111 23:42:17.653993  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:17.654159  120899 wrap.go:47] GET /healthz: (2.219629ms) 500
goroutine 28219 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00a677e30, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00a677e30, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003ab24a0, 0x1f4)
net/http.Error(0x7eff88166590, 0xc008f02a98, 0xc0039ecb40, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc008f02a98, 0xc00a813800)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc008f02a98, 0xc00a813800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc008f02a98, 0xc00a813800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc008f02a98, 0xc00a813800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc008f02a98, 0xc00a813800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc008f02a98, 0xc00a813800)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc008f02a98, 0xc00a813800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc008f02a98, 0xc00a813800)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc008f02a98, 0xc00a813800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc008f02a98, 0xc00a813800)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc008f02a98, 0xc00a813800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc008f02a98, 0xc00a813700)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc008f02a98, 0xc00a813700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006faa7e0, 0xc00dc44ea0, 0x604db80, 0xc008f02a98, 0xc00a813700)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:17.671089  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:persistent-volume-binder\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00aba37a0), Code:404}}
I0111 23:42:17.671272  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:persistent-volume-binder: (1.434401ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:17.692531  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.632864ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:17.692945  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I0111 23:42:17.711094  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:pod-garbage-collector\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00acdc720), Code:404}}
I0111 23:42:17.711297  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pod-garbage-collector: (1.429944ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:17.732452  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.553382ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:17.732699  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I0111 23:42:17.751485  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:replicaset-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00acdd200), Code:404}}
I0111 23:42:17.751655  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replicaset-controller: (1.784128ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:17.753193  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:17.753381  120899 wrap.go:47] GET /healthz: (2.217235ms) 500
goroutine 28260 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00ad06a80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00ad06a80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003b20600, 0x1f4)
net/http.Error(0x7eff88166590, 0xc00d4b18b8, 0xc002457540, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc00d4b18b8, 0xc0054b1100)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc00d4b18b8, 0xc0054b1100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc00d4b18b8, 0xc0054b1100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc00d4b18b8, 0xc0054b1100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc00d4b18b8, 0xc0054b1100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc00d4b18b8, 0xc0054b1100)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc00d4b18b8, 0xc0054b1100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc00d4b18b8, 0xc0054b1100)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc00d4b18b8, 0xc0054b1100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc00d4b18b8, 0xc0054b1100)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc00d4b18b8, 0xc0054b1100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc00d4b18b8, 0xc0054b1000)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc00d4b18b8, 0xc0054b1000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00709efc0, 0xc00dc44ea0, 0x604db80, 0xc00d4b18b8, 0xc0054b1000)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38390]
I0111 23:42:17.773626  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.709731ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.774110  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replicaset-controller
I0111 23:42:17.791177  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:replication-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00aba3b00), Code:404}}
I0111 23:42:17.791421  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replication-controller: (1.501357ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.812525  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.622639ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.812935  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replication-controller
I0111 23:42:17.831609  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:resourcequota-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00aba3da0), Code:404}}
I0111 23:42:17.832516  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:resourcequota-controller: (2.484581ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.852925  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:17.852976  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.143562ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.853093  120899 wrap.go:47] GET /healthz: (2.388173ms) 500
goroutine 28241 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc0080c93b0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc0080c93b0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003a99880, 0x1f4)
net/http.Error(0x7eff88166590, 0xc002208ea0, 0xc002457a40, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc002208ea0, 0xc002ab1400)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc002208ea0, 0xc002ab1400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc002208ea0, 0xc002ab1400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc002208ea0, 0xc002ab1400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc002208ea0, 0xc002ab1400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc002208ea0, 0xc002ab1400)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc002208ea0, 0xc002ab1400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc002208ea0, 0xc002ab1400)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc002208ea0, 0xc002ab1400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc002208ea0, 0xc002ab1400)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc002208ea0, 0xc002ab1400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc002208ea0, 0xc002ab1300)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc002208ea0, 0xc002ab1300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00644bf80, 0xc00dc44ea0, 0x604db80, 0xc002208ea0, 0xc002ab1300)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:17.853215  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I0111 23:42:17.871114  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:route-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ac09f20), Code:404}}
I0111 23:42:17.871547  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:route-controller: (1.675365ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:17.892115  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.251079ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:17.892415  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:route-controller
I0111 23:42:17.911108  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:service-account-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae04540), Code:404}}
I0111 23:42:17.911340  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-account-controller: (1.486013ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:17.931981  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.12058ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:17.932244  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-account-controller
I0111 23:42:17.965681  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:service-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae604e0), Code:404}}
I0111 23:42:17.965898  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-controller: (5.265275ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:17.967019  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:17.967256  120899 wrap.go:47] GET /healthz: (6.161476ms) 500
goroutine 28308 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00ac78e70, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00ac78e70, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003c36480, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0090875c8, 0xc002616500, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0090875c8, 0xc002105b00)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0090875c8, 0xc002105b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0090875c8, 0xc002105b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0090875c8, 0xc002105b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0090875c8, 0xc002105b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0090875c8, 0xc002105b00)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0090875c8, 0xc002105b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0090875c8, 0xc002105b00)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0090875c8, 0xc002105b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0090875c8, 0xc002105b00)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0090875c8, 0xc002105b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0090875c8, 0xc002105a00)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0090875c8, 0xc002105a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006fdd740, 0xc00dc44ea0, 0x604db80, 0xc0090875c8, 0xc002105a00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:17.972341  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.366143ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:17.972823  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-controller
I0111 23:42:17.995188  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:statefulset-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00acbdb00), Code:404}}
I0111 23:42:17.995426  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:statefulset-controller: (1.593178ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:18.012340  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.477386ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:18.012892  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:statefulset-controller
I0111 23:42:18.031145  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:ttl-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ad6e3c0), Code:404}}
I0111 23:42:18.031368  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:ttl-controller: (1.476341ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:18.052602  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.731328ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:18.052905  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:ttl-controller
I0111 23:42:18.070258  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:18.070444  120899 wrap.go:47] GET /healthz: (1.367722ms) 500
goroutine 28315 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00ac79ab0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00ac79ab0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003c6d5a0, 0x1f4)
net/http.Error(0x7eff88166590, 0xc009087750, 0xc002507cc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc009087750, 0xc00a9d9a00)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc009087750, 0xc00a9d9a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc009087750, 0xc00a9d9a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc009087750, 0xc00a9d9a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc009087750, 0xc00a9d9a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc009087750, 0xc00a9d9a00)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc009087750, 0xc00a9d9a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc009087750, 0xc00a9d9a00)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc009087750, 0xc00a9d9a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc009087750, 0xc00a9d9a00)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc009087750, 0xc00a9d9a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc009087750, 0xc00a9d9800)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc009087750, 0xc00a9d9800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0073fa2a0, 0xc00dc44ea0, 0x604db80, 0xc009087750, 0xc00a9d9800)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:18.071687  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:certificate-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae619e0), Code:404}}
I0111 23:42:18.072207  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:certificate-controller: (2.085316ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.092145  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.320659ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.092521  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:certificate-controller
I0111 23:42:18.111037  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:pvc-protection-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae8a600), Code:404}}
I0111 23:42:18.111238  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pvc-protection-controller: (1.378589ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.132315  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.428435ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.132598  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I0111 23:42:18.151455  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"clusterrolebindings.rbac.authorization.k8s.io \"system:controller:pv-protection-controller\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00af651a0), Code:404}}
I0111 23:42:18.151638  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pv-protection-controller: (1.803222ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.152679  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:18.152870  120899 wrap.go:47] GET /healthz: (1.831538ms) 500
goroutine 28304 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00b3a82a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00b3a82a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003c7ca80, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0022090c0, 0xc0039ed040, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0022090c0, 0xc00b5c0300)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0022090c0, 0xc00b5c0300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0022090c0, 0xc00b5c0300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0022090c0, 0xc00b5c0300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0022090c0, 0xc00b5c0300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0022090c0, 0xc00b5c0300)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0022090c0, 0xc00b5c0300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0022090c0, 0xc00b5c0300)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0022090c0, 0xc00b5c0300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0022090c0, 0xc00b5c0300)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0022090c0, 0xc00b5c0300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0022090c0, 0xc00b5c0200)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0022090c0, 0xc00b5c0200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc007545b00, 0xc00dc44ea0, 0x604db80, 0xc0022090c0, 0xc00b5c0200)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:18.172385  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.484766ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:18.172632  120899 storage_rbac.go:215] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I0111 23:42:18.191034  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"roles.rbac.authorization.k8s.io \"system:controller:bootstrap-signer\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00af86f60), Code:404}}
I0111 23:42:18.191223  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles/system:controller:bootstrap-signer: (1.354992ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:18.195080  120899 wrap.go:47] GET /api/v1/namespaces/kube-public: (3.419418ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:18.211995  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles: (2.141589ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:18.212232  120899 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I0111 23:42:18.231060  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"roles.rbac.authorization.k8s.io \"extension-apiserver-authentication-reader\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae8bda0), Code:404}}
I0111 23:42:18.231240  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/extension-apiserver-authentication-reader: (1.385838ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:18.233343  120899 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.672341ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:18.251972  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.190316ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:18.252077  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:18.252217  120899 wrap.go:47] GET /healthz: (1.567311ms) 500
goroutine 28370 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00affbf10, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00affbf10, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003cc1f80, 0x1f4)
net/http.Error(0x7eff88166590, 0xc003a8af58, 0xc0039ed540, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc003a8af58, 0xc00b611600)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc003a8af58, 0xc00b611600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc003a8af58, 0xc00b611600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc003a8af58, 0xc00b611600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc003a8af58, 0xc00b611600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc003a8af58, 0xc00b611600)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc003a8af58, 0xc00b611600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc003a8af58, 0xc00b611600)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc003a8af58, 0xc00b611600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc003a8af58, 0xc00b611600)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc003a8af58, 0xc00b611600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc003a8af58, 0xc00b611500)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc003a8af58, 0xc00b611500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00765e8a0, 0xc00dc44ea0, 0x604db80, 0xc003a8af58, 0xc00b611500)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38390]
I0111 23:42:18.252431  120899 storage_rbac.go:246] created role.rbac.authorization.k8s.io/extension-apiserver-authentication-reader in kube-system
I0111 23:42:18.271052  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"roles.rbac.authorization.k8s.io \"system:controller:bootstrap-signer\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ad6f080), Code:404}}
I0111 23:42:18.271272  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:bootstrap-signer: (1.432304ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.273187  120899 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.484956ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.295513  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.752234ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.295795  120899 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I0111 23:42:18.311107  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"roles.rbac.authorization.k8s.io \"system:controller:cloud-provider\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00afcf680), Code:404}}
I0111 23:42:18.311368  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:cloud-provider: (1.498423ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.313918  120899 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.991173ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.354509  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (24.59009ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.354816  120899 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0111 23:42:18.356732  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:18.356935  120899 wrap.go:47] GET /healthz: (3.450692ms) 500
goroutine 28340 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00b3a95e0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00b3a95e0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003d46800, 0x1f4)
net/http.Error(0x7eff88166590, 0xc002209210, 0xc00541c8c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc002209210, 0xc00b5c1400)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc002209210, 0xc00b5c1400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc002209210, 0xc00b5c1400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc002209210, 0xc00b5c1400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc002209210, 0xc00b5c1400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc002209210, 0xc00b5c1400)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc002209210, 0xc00b5c1400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc002209210, 0xc00b5c1400)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc002209210, 0xc00b5c1400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc002209210, 0xc00b5c1400)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc002209210, 0xc00b5c1400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc002209210, 0xc00b5c1300)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc002209210, 0xc00b5c1300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc007560a80, 0xc00dc44ea0, 0x604db80, 0xc002209210, 0xc00b5c1300)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:18.357096  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"roles.rbac.authorization.k8s.io \"system:controller:token-cleaner\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae2b3e0), Code:404}}
I0111 23:42:18.357232  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:token-cleaner: (2.181784ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.359255  120899 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.527774ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.372641  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.753501ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.372962  120899 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I0111 23:42:18.390764  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"roles.rbac.authorization.k8s.io \"system::leader-locking-kube-controller-manager\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae2ba40), Code:404}}
I0111 23:42:18.390963  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-controller-manager: (1.108141ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.392605  120899 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.224564ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.412084  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.258076ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.412672  120899 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I0111 23:42:18.435017  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"roles.rbac.authorization.k8s.io \"system::leader-locking-kube-scheduler\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b091800), Code:404}}
I0111 23:42:18.435225  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-scheduler: (5.229277ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.437163  120899 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.45678ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.452012  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:18.452177  120899 wrap.go:47] GET /healthz: (1.488161ms) 500
goroutine 28324 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00a775c70, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00a775c70, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003a37dc0, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0029bc690, 0xc002616a00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0029bc690, 0xc00793ac00)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0029bc690, 0xc00793ac00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0029bc690, 0xc00793ac00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0029bc690, 0xc00793ac00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0029bc690, 0xc00793ac00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0029bc690, 0xc00793ac00)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0029bc690, 0xc00793ac00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0029bc690, 0xc00793ac00)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0029bc690, 0xc00793ac00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0029bc690, 0xc00793ac00)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0029bc690, 0xc00793ac00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0029bc690, 0xc00793ab00)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0029bc690, 0xc00793ab00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006c65f20, 0xc00dc44ea0, 0x604db80, 0xc0029bc690, 0xc00793ab00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:18.452860  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.964693ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.453159  120899 storage_rbac.go:246] created role.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I0111 23:42:18.471204  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"rolebindings.rbac.authorization.k8s.io \"system::leader-locking-kube-controller-manager\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b09da40), Code:404}}
I0111 23:42:18.471437  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-controller-manager: (1.568049ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.473618  120899 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.473718ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.492978  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (3.089742ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.493228  120899 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I0111 23:42:18.511079  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"rolebindings.rbac.authorization.k8s.io \"system::leader-locking-kube-scheduler\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b15e6c0), Code:404}}
I0111 23:42:18.511269  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-scheduler: (1.417773ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.513095  120899 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.347871ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.531964  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.105626ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.532246  120899 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I0111 23:42:18.552200  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:18.552404  120899 wrap.go:47] GET /healthz: (1.226565ms) 500
goroutine 28350 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e68eee0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e68eee0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003de70e0, 0x1f4)
net/http.Error(0x7eff88166590, 0xc0022094c0, 0xc0039ed900, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc0022094c0, 0xc00fde0400)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc0022094c0, 0xc00fde0400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc0022094c0, 0xc00fde0400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc0022094c0, 0xc00fde0400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc0022094c0, 0xc00fde0400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc0022094c0, 0xc00fde0400)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc0022094c0, 0xc00fde0400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc0022094c0, 0xc00fde0400)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc0022094c0, 0xc00fde0400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc0022094c0, 0xc00fde0400)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc0022094c0, 0xc00fde0400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc0022094c0, 0xc00fde0300)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc0022094c0, 0xc00fde0300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0078bbaa0, 0xc00dc44ea0, 0x604db80, 0xc0022094c0, 0xc00fde0300)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:18.553207  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"rolebindings.rbac.authorization.k8s.io \"system:controller:bootstrap-signer\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b14aa20), Code:404}}
I0111 23:42:18.553448  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:bootstrap-signer: (3.546863ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.555201  120899 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.346516ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.572271  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.396881ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.572642  120899 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I0111 23:42:18.599944  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"rolebindings.rbac.authorization.k8s.io \"system:controller:cloud-provider\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b0e8cc0), Code:404}}
I0111 23:42:18.600166  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:cloud-provider: (1.306486ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.602124  120899 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.315593ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.612212  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.361453ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.612568  120899 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0111 23:42:18.631096  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"rolebindings.rbac.authorization.k8s.io \"system:controller:token-cleaner\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b0e9740), Code:404}}
I0111 23:42:18.631419  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:token-cleaner: (1.53447ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.633326  120899 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.445162ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.651912  120899 healthz.go:170] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0111 23:42:18.651959  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.048581ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.652178  120899 wrap.go:47] GET /healthz: (1.474069ms) 500
goroutine 28429 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e6a9ab0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e6a9ab0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc003e2b980, 0x1f4)
net/http.Error(0x7eff88166590, 0xc008f032d0, 0xc002499e00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7eff88166590, 0xc008f032d0, 0xc00fdd7c00)
net/http.HandlerFunc.ServeHTTP(0xc008c6fee0, 0x7eff88166590, 0xc008f032d0, 0xc00fdd7c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc009597440, 0x7eff88166590, 0xc008f032d0, 0xc00fdd7c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc01030dc70, 0x7eff88166590, 0xc008f032d0, 0xc00fdd7c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x40e951a, 0xe, 0xc0102faa20, 0xc01030dc70, 0x7eff88166590, 0xc008f032d0, 0xc00fdd7c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7eff88166590, 0xc008f032d0, 0xc00fdd7c00)
net/http.HandlerFunc.ServeHTTP(0xc01030bfc0, 0x7eff88166590, 0xc008f032d0, 0xc00fdd7c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7eff88166590, 0xc008f032d0, 0xc00fdd7c00)
net/http.HandlerFunc.ServeHTTP(0xc00f7db620, 0x7eff88166590, 0xc008f032d0, 0xc00fdd7c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7eff88166590, 0xc008f032d0, 0xc00fdd7c00)
net/http.HandlerFunc.ServeHTTP(0xc00dc48000, 0x7eff88166590, 0xc008f032d0, 0xc00fdd7c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7eff88166590, 0xc008f032d0, 0xc00fdd7b00)
net/http.HandlerFunc.ServeHTTP(0xc00e43af00, 0x7eff88166590, 0xc008f032d0, 0xc00fdd7b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0020d1800, 0xc00dc44ea0, 0x604db80, 0xc008f032d0, 0xc00fdd7b00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:38392]
I0111 23:42:18.652453  120899 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I0111 23:42:18.671301  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"rolebindings.rbac.authorization.k8s.io \"system:controller:bootstrap-signer\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b14b980), Code:404}}
I0111 23:42:18.671541  120899 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings/system:controller:bootstrap-signer: (1.657658ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.673864  120899 wrap.go:47] GET /api/v1/namespaces/kube-public: (1.599713ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.694214  120899 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings: (4.319665ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.694510  120899 storage_rbac.go:276] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I0111 23:42:18.751814  120899 wrap.go:47] GET /healthz: (1.020814ms) 200 [Go-http-client/1.1 127.0.0.1:38390]
W0111 23:42:18.752609  120899 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0111 23:42:18.752657  120899 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0111 23:42:18.752684  120899 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0111 23:42:18.752697  120899 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0111 23:42:18.752708  120899 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0111 23:42:18.752719  120899 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0111 23:42:18.752728  120899 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0111 23:42:18.752759  120899 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0111 23:42:18.752784  120899 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W0111 23:42:18.752799  120899 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
I0111 23:42:18.752923  120899 factory.go:745] Creating scheduler from algorithm provider 'DefaultProvider'
I0111 23:42:18.752932  120899 factory.go:826] Creating scheduler with fit predicates 'map[NoDiskConflict:{} GeneralPredicates:{} PodToleratesNodeTaints:{} MatchInterPodAffinity:{} CheckNodePIDPressure:{} MaxAzureDiskVolumeCount:{} MaxGCEPDVolumeCount:{} MaxCSIVolumeCountPred:{} CheckNodeMemoryPressure:{} CheckNodeCondition:{} CheckVolumeBinding:{} MaxEBSVolumeCount:{} CheckNodeDiskPressure:{} NoVolumeZoneConflict:{}]' and priority functions 'map[NodeAffinityPriority:{} TaintTolerationPriority:{} ImageLocalityPriority:{} SelectorSpreadPriority:{} InterPodAffinityPriority:{} LeastRequestedPriority:{} BalancedResourceAllocation:{} NodePreferAvoidPodsPriority:{}]'
I0111 23:42:18.753035  120899 controller_utils.go:1021] Waiting for caches to sync for scheduler controller
I0111 23:42:18.753273  120899 reflector.go:131] Starting reflector *v1.Pod (12h0m0s) from k8s.io/kubernetes/test/integration/scheduler/util.go:194
I0111 23:42:18.753316  120899 reflector.go:169] Listing and watching *v1.Pod from k8s.io/kubernetes/test/integration/scheduler/util.go:194
I0111 23:42:18.754335  120899 wrap.go:47] GET /api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: (772.273µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38390]
I0111 23:42:18.755490  120899 get.go:252] Starting watch for /api/v1/pods, rv=18597 labels= fields=status.phase!=Failed,status.phase!=Succeeded timeout=7m22s
I0111 23:42:18.853219  120899 shared_informer.go:123] caches populated
I0111 23:42:18.853265  120899 controller_utils.go:1028] Caches are synced for scheduler controller
I0111 23:42:18.853795  120899 reflector.go:131] Starting reflector *v1.Service (1s) from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.853826  120899 reflector.go:169] Listing and watching *v1.Service from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.854219  120899 reflector.go:131] Starting reflector *v1.ReplicationController (1s) from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.854241  120899 reflector.go:169] Listing and watching *v1.ReplicationController from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.855121  120899 wrap.go:47] GET /api/v1/services?limit=500&resourceVersion=0: (724.352µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38392]
I0111 23:42:18.855963  120899 get.go:252] Starting watch for /api/v1/services, rv=18603 labels= fields= timeout=5m7s
I0111 23:42:18.856418  120899 reflector.go:131] Starting reflector *v1beta1.PodDisruptionBudget (1s) from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.856445  120899 reflector.go:169] Listing and watching *v1beta1.PodDisruptionBudget from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.857010  120899 reflector.go:131] Starting reflector *v1.ReplicaSet (1s) from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.857035  120899 reflector.go:169] Listing and watching *v1.ReplicaSet from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.857448  120899 reflector.go:131] Starting reflector *v1.StorageClass (1s) from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.857475  120899 reflector.go:169] Listing and watching *v1.StorageClass from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.857863  120899 reflector.go:131] Starting reflector *v1.Node (1s) from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.857889  120899 reflector.go:169] Listing and watching *v1.Node from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.859440  120899 wrap.go:47] GET /apis/apps/v1/replicasets?limit=500&resourceVersion=0: (577.123µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38456]
I0111 23:42:18.859976  120899 wrap.go:47] GET /apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: (410.055µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38454]
I0111 23:42:18.860073  120899 wrap.go:47] GET /apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: (473.378µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38458]
I0111 23:42:18.860703  120899 wrap.go:47] GET /api/v1/nodes?limit=500&resourceVersion=0: (433.534µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38456]
I0111 23:42:18.860710  120899 get.go:252] Starting watch for /apis/policy/v1beta1/poddisruptionbudgets, rv=18598 labels= fields= timeout=7m42s
I0111 23:42:18.860699  120899 get.go:252] Starting watch for /apis/apps/v1/replicasets, rv=18598 labels= fields= timeout=9m38s
I0111 23:42:18.860963  120899 reflector.go:131] Starting reflector *v1.PersistentVolume (1s) from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.860979  120899 reflector.go:169] Listing and watching *v1.PersistentVolume from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.861027  120899 get.go:252] Starting watch for /apis/storage.k8s.io/v1/storageclasses, rv=18598 labels= fields= timeout=8m4s
I0111 23:42:18.861409  120899 reflector.go:131] Starting reflector *v1.StatefulSet (1s) from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.861427  120899 reflector.go:169] Listing and watching *v1.StatefulSet from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.861431  120899 get.go:252] Starting watch for /api/v1/nodes, rv=18597 labels= fields= timeout=5m46s
I0111 23:42:18.861174  120899 reflector.go:131] Starting reflector *v1.PersistentVolumeClaim (1s) from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.861507  120899 reflector.go:169] Listing and watching *v1.PersistentVolumeClaim from k8s.io/client-go/informers/factory.go:132
I0111 23:42:18.861632  120899 wrap.go:47] GET /api/v1/persistentvolumes?limit=500&resourceVersion=0: (450.027µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38466]
I0111 23:42:18.862158  120899 wrap.go:47] GET /apis/apps/v1/statefulsets?limit=500&resourceVersion=0: (339.072µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38468]
I0111 23:42:18.862540  120899 wrap.go:47] GET /api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: (427.727µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38466]
I0111 23:42:18.862731  120899 get.go:252] Starting watch for /apis/apps/v1/statefulsets, rv=18598 labels= fields= timeout=7m6s
I0111 23:42:18.863118  120899 get.go:252] Starting watch for /api/v1/persistentvolumeclaims, rv=18597 labels= fields= timeout=8m23s
I0111 23:42:18.863479  120899 wrap.go:47] GET /api/v1/replicationcontrollers?limit=500&resourceVersion=0: (4.571981ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38462]
I0111 23:42:18.864646  120899 get.go:252] Starting watch for /api/v1/persistentvolumes, rv=18597 labels= fields= timeout=5m6s
I0111 23:42:18.864787  120899 get.go:252] Starting watch for /api/v1/replicationcontrollers, rv=18597 labels= fields= timeout=6m45s
I0111 23:42:18.953616  120899 shared_informer.go:123] caches populated
I0111 23:42:19.060152  120899 shared_informer.go:123] caches populated
I0111 23:42:19.160391  120899 shared_informer.go:123] caches populated
I0111 23:42:19.260693  120899 shared_informer.go:123] caches populated
I0111 23:42:19.360901  120899 shared_informer.go:123] caches populated
I0111 23:42:19.461119  120899 shared_informer.go:123] caches populated
I0111 23:42:19.561381  120899 shared_informer.go:123] caches populated
I0111 23:42:19.661552  120899 shared_informer.go:123] caches populated
I0111 23:42:19.761879  120899 shared_informer.go:123] caches populated
I0111 23:42:19.855797  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:19.860583  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:19.861228  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:19.862073  120899 shared_informer.go:123] caches populated
I0111 23:42:19.862511  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:19.862959  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:19.865918  120899 wrap.go:47] POST /api/v1/nodes: (3.388612ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38472]
I0111 23:42:19.869523  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.743183ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38472]
I0111 23:42:19.870149  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-0
I0111 23:42:19.870167  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-0
I0111 23:42:19.870329  120899 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-0", node "node1"
I0111 23:42:19.870341  120899 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I0111 23:42:19.870383  120899 factory.go:1166] Attempting to bind rpod-0 to node1
I0111 23:42:19.872127  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.984822ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38472]
I0111 23:42:19.872190  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-1
I0111 23:42:19.872200  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-1
I0111 23:42:19.872345  120899 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-1", node "node1"
I0111 23:42:19.872359  120899 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I0111 23:42:19.872397  120899 factory.go:1166] Attempting to bind rpod-1 to node1
I0111 23:42:19.873696  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-0/binding: (2.815413ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38504]
I0111 23:42:19.873904  120899 scheduler.go:569] pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0111 23:42:19.875044  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-1/binding: (2.414239ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38472]
I0111 23:42:19.875183  120899 scheduler.go:569] pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0111 23:42:19.876260  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.001849ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38504]
I0111 23:42:19.878396  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.649421ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38504]
I0111 23:42:19.974635  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-0: (1.841671ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38504]
I0111 23:42:20.077710  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-1: (2.273122ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38504]
I0111 23:42:20.078227  120899 preemption_test.go:561] Creating the preemptor pod...
I0111 23:42:20.081541  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.881074ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38504]
I0111 23:42:20.081792  120899 preemption_test.go:567] Creating additional pods...
I0111 23:42:20.082141  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod
I0111 23:42:20.082152  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod
I0111 23:42:20.082296  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.082337  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.087210  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.037367ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38508]
I0111 23:42:20.090005  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (6.106417ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38506]
I0111 23:42:20.090436  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod/status: (5.235921ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38472]
I0111 23:42:20.090808  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (6.479907ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38504]
I0111 23:42:20.094852  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.188877ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38504]
I0111 23:42:20.096533  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (4.265625ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38472]
I0111 23:42:20.096878  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.099371  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.948479ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38506]
I0111 23:42:20.099801  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod/status: (2.613144ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38472]
I0111 23:42:20.102520  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.30481ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38506]
I0111 23:42:20.105843  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.880683ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38506]
I0111 23:42:20.108911  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.261768ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38506]
I0111 23:42:20.112253  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.919807ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38506]
I0111 23:42:20.115425  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.798671ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38506]
I0111 23:42:20.115451  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-1: (13.498841ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38472]
I0111 23:42:20.116512  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod
I0111 23:42:20.116590  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod
I0111 23:42:20.116788  120899 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod", node "node1"
I0111 23:42:20.116822  120899 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I0111 23:42:20.117011  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7
I0111 23:42:20.117045  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7
I0111 23:42:20.117158  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.117223  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.117470  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.580919ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38472]
I0111 23:42:20.118122  120899 factory.go:1166] Attempting to bind preemptor-pod to node1
I0111 23:42:20.120554  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.543092ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38516]
I0111 23:42:20.122549  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (6.559951ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38508]
I0111 23:42:20.123152  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (4.517743ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38512]
I0111 23:42:20.123439  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod/binding: (4.325064ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38514]
I0111 23:42:20.124169  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7/status: (6.265073ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38472]
I0111 23:42:20.125186  120899 scheduler.go:569] pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0111 23:42:20.126614  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.476058ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38516]
I0111 23:42:20.128147  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (1.665783ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38512]
I0111 23:42:20.128538  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.129014  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.856185ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38516]
I0111 23:42:20.129127  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6
I0111 23:42:20.129424  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6
I0111 23:42:20.129535  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.129587  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.132040  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (1.848698ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38518]
I0111 23:42:20.132441  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.651357ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38516]
I0111 23:42:20.132840  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.726213ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38510]
I0111 23:42:20.134609  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.418549ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38510]
I0111 23:42:20.135177  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.225833ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38516]
I0111 23:42:20.135304  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6/status: (4.875708ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38512]
I0111 23:42:20.157849  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.675722ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38516]
I0111 23:42:20.160080  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (5.884977ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38510]
I0111 23:42:20.160398  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.160534  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9
I0111 23:42:20.160544  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9
I0111 23:42:20.160617  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.160655  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.162523  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.115274ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38516]
I0111 23:42:20.162880  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (1.861896ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38518]
I0111 23:42:20.164805  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.890533ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38516]
I0111 23:42:20.165063  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.482569ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38554]
I0111 23:42:20.167154  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.976038ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38516]
I0111 23:42:20.169612  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9/status: (6.253866ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38510]
I0111 23:42:20.170095  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.47634ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38554]
I0111 23:42:20.172496  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (1.239853ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38510]
I0111 23:42:20.172841  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.173337  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.174509ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38554]
I0111 23:42:20.174066  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13
I0111 23:42:20.174089  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13
I0111 23:42:20.174214  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.174399  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.177551  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.90223ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38560]
I0111 23:42:20.177579  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.574647ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38510]
I0111 23:42:20.178761  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (2.280886ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38558]
I0111 23:42:20.179118  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13/status: (3.493949ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38518]
I0111 23:42:20.180990  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.900622ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38560]
I0111 23:42:20.181043  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (1.363486ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38558]
I0111 23:42:20.181329  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.181493  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:20.181653  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:20.181852  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.181944  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.184575  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.87348ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38566]
I0111 23:42:20.193729  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (9.497378ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38560]
I0111 23:42:20.194835  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (9.863045ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38566]
I0111 23:42:20.201551  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18/status: (19.378343ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38510]
I0111 23:42:20.202063  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.326435ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38566]
I0111 23:42:20.208171  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (4.994861ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38566]
I0111 23:42:20.208332  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.204051ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38564]
I0111 23:42:20.208635  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.208917  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13
I0111 23:42:20.208963  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13
I0111 23:42:20.209072  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.209128  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.212918  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.836824ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38566]
I0111 23:42:20.213124  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (1.710923ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38594]
I0111 23:42:20.213174  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13/status: (2.781929ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38564]
I0111 23:42:20.214998  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-13.1578efce07c47867: (4.275872ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38596]
I0111 23:42:20.215580  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (1.390379ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38564]
I0111 23:42:20.216232  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.216384  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:20.216420  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:20.216542  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.216660  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.218535  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (1.607817ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38564]
I0111 23:42:20.219823  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22/status: (2.57178ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38594]
I0111 23:42:20.220201  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.639125ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38602]
I0111 23:42:20.220504  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (5.112707ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38566]
I0111 23:42:20.222357  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (1.139076ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38564]
I0111 23:42:20.222671  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.222876  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:20.222901  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:20.222980  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.223024  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.490546ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38566]
I0111 23:42:20.223028  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.226192  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.910054ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38606]
I0111 23:42:20.226731  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24/status: (3.128515ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38564]
I0111 23:42:20.227202  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (3.317611ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38594]
I0111 23:42:20.229063  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (1.349215ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38564]
I0111 23:42:20.229096  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.416841ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38606]
I0111 23:42:20.229623  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.229805  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:20.229831  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:20.229913  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.229961  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.233595  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.942585ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38608]
I0111 23:42:20.236221  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25/status: (4.122532ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38564]
I0111 23:42:20.236226  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (4.402483ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38594]
I0111 23:42:20.236609  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.932722ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38606]
I0111 23:42:20.239363  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.240148ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38606]
I0111 23:42:20.239450  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (1.575027ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38564]
I0111 23:42:20.239730  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.240556  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:20.240569  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:20.240655  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.240691  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.242842  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.954998ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38606]
I0111 23:42:20.248694  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (6.961733ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0111 23:42:20.248859  120899 cacher.go:598] cacher (*core.Pod): 1 objects queued in incoming channel.
I0111 23:42:20.249165  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (5.917473ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38606]
I0111 23:42:20.252260  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27/status: (11.30279ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38608]
I0111 23:42:20.255601  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (14.011563ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38612]
I0111 23:42:20.255727  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (2.755096ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38608]
I0111 23:42:20.256081  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.256270  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:20.256329  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:20.256439  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.256500  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.259016  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.685206ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0111 23:42:20.259786  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (1.388666ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38644]
I0111 23:42:20.260905  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (10.82316ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38606]
I0111 23:42:20.267107  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28/status: (9.578988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38612]
I0111 23:42:20.268146  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (6.833634ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38644]
I0111 23:42:20.271170  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.699267ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38644]
I0111 23:42:20.271574  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (2.540129ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38612]
I0111 23:42:20.272037  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.272485  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:20.272516  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:20.272624  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.272678  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.276080  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-27.1578efce0bb816a4: (2.244992ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38670]
I0111 23:42:20.277560  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27/status: (3.969933ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0111 23:42:20.277917  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (4.489685ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38668]
I0111 23:42:20.278418  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (5.913414ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38612]
I0111 23:42:20.280582  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (2.60345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0111 23:42:20.280606  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.800277ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38668]
I0111 23:42:20.280842  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.281058  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:20.281072  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:20.281141  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.281190  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.282828  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.776409ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0111 23:42:20.283066  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (1.346041ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38670]
I0111 23:42:20.285201  120899 cacher.go:598] cacher (*core.Pod): 2 objects queued in incoming channel.
I0111 23:42:20.285234  120899 cacher.go:598] cacher (*core.Pod): 3 objects queued in incoming channel.
I0111 23:42:20.285373  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.090201ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0111 23:42:20.285423  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.973713ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38670]
I0111 23:42:20.285838  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32/status: (3.494583ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0111 23:42:20.297787  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (11.575675ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38672]
I0111 23:42:20.297925  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (12.003637ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38610]
I0111 23:42:20.298115  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.299343  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:20.299357  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:20.299466  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.299504  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.304408  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-28.1578efce0ca949b4: (3.958835ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0111 23:42:20.307651  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (9.040271ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38674]
I0111 23:42:20.311647  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (11.083798ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38678]
I0111 23:42:20.319201  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (10.71074ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38674]
I0111 23:42:20.319807  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28/status: (20.074303ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38676]
I0111 23:42:20.325093  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (1.959402ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0111 23:42:20.325494  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.326206  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:20.326221  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:20.326329  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.326372  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.325515  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.307658ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38678]
I0111 23:42:20.330193  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.490611ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38678]
I0111 23:42:20.330340  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39/status: (3.699846ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0111 23:42:20.332102  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (4.066957ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38706]
I0111 23:42:20.332693  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (5.371448ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38708]
I0111 23:42:20.332957  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.008839ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38678]
I0111 23:42:20.333335  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (2.608086ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38680]
I0111 23:42:20.333573  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.333831  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42
I0111 23:42:20.333844  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42
I0111 23:42:20.333923  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.333961  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.335892  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.366436ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38678]
I0111 23:42:20.343272  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (8.019533ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38716]
I0111 23:42:20.343388  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (8.358445ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38708]
I0111 23:42:20.343521  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (6.985019ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38678]
I0111 23:42:20.343870  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42/status: (8.07617ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38706]
I0111 23:42:20.345778  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (1.537169ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38716]
I0111 23:42:20.346008  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.346124  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:20.346146  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:20.346221  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.346256  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.348700  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.542852ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38730]
I0111 23:42:20.351259  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (4.134309ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38728]
I0111 23:42:20.351984  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (6.644484ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38714]
I0111 23:42:20.352077  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43/status: (5.587302ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38716]
I0111 23:42:20.368873  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (16.32147ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38730]
I0111 23:42:20.368873  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (16.430573ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38728]
I0111 23:42:20.369730  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.369929  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:20.369940  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:20.370057  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.370096  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.372481  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.622953ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38750]
I0111 23:42:20.372799  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (1.448332ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38730]
I0111 23:42:20.373641  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46/status: (2.014917ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38748]
I0111 23:42:20.375164  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (1.121004ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38730]
I0111 23:42:20.375462  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.376152  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (6.107234ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38728]
I0111 23:42:20.376717  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48
I0111 23:42:20.376732  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48
I0111 23:42:20.376847  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.376887  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.380038  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.419305ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0111 23:42:20.381794  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48/status: (3.348148ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38730]
I0111 23:42:20.382250  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (4.081295ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38750]
I0111 23:42:20.384812  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (1.54991ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38750]
I0111 23:42:20.385032  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.385151  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:20.385161  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:20.385226  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.385262  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.388463  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.322724ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38760]
I0111 23:42:20.390169  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49/status: (3.166999ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38750]
I0111 23:42:20.390472  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (3.725108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0111 23:42:20.392301  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (1.100884ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0111 23:42:20.392561  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.392703  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:20.392775  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:20.392889  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.392956  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.394913  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46/status: (1.699664ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0111 23:42:20.395252  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (1.401281ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38760]
I0111 23:42:20.397963  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-46.1578efce136ea118: (2.592009ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0111 23:42:20.398725  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (3.163285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38764]
I0111 23:42:20.399013  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.399177  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:20.399228  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:20.399416  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.399497  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.401133  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (1.212984ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38760]
I0111 23:42:20.403130  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49/status: (2.978619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0111 23:42:20.403254  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-49.1578efce145610c6: (2.982085ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38766]
I0111 23:42:20.404809  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (1.292524ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0111 23:42:20.405084  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.405246  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:20.405265  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:20.405419  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.405530  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.407580  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.7346ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38760]
I0111 23:42:20.408503  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (2.180776ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.409650  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47/status: (3.803393ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38754]
I0111 23:42:20.411498  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (1.472457ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.411812  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.412004  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:20.412029  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:20.412106  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.412157  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.415936  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43/status: (3.44668ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38760]
I0111 23:42:20.416371  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (3.555181ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.416714  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-43.1578efce1202e072: (2.983266ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38776]
I0111 23:42:20.417558  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (1.06517ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38760]
I0111 23:42:20.419087  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.419317  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:20.419348  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:20.419455  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.419510  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.422701  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47/status: (2.226656ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.423093  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (3.115634ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38776]
I0111 23:42:20.425442  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (1.669067ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.426197  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.426391  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:20.426418  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:20.426513  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.426561  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.427860  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (1.032896ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0111 23:42:20.429542  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45/status: (2.7455ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.431400  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (1.200502ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.431671  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.431874  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44
I0111 23:42:20.431888  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44
I0111 23:42:20.431986  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.432029  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.433274  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-47.1578efce158b2855: (9.466464ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38776]
I0111 23:42:20.433768  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (1.32559ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0111 23:42:20.436476  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44/status: (4.198892ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.437266  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.564198ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38776]
I0111 23:42:20.438138  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (1.168166ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.438409  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.438550  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:20.438570  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:20.438639  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.438696  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.440687  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (1.709907ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0111 23:42:20.440736  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45/status: (1.671449ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.441661  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.747758ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38776]
I0111 23:42:20.447859  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (1.489729ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.448202  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.448440  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44
I0111 23:42:20.448492  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44
I0111 23:42:20.448642  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.448776  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.450158  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-45.1578efce16cc3a50: (5.089966ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0111 23:42:20.451416  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44/status: (2.183114ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.451712  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (2.297113ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.453022  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (1.094815ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.453377  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.453626  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:20.453666  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:20.453816  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.453889  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.455569  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (1.083053ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.456605  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39/status: (2.126516ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.458194  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (1.126995ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.458607  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.458812  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:20.458858  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:20.458974  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.459041  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.460843  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-44.1578efce171fa58e: (9.720103ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0111 23:42:20.462370  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41/status: (2.347105ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.463262  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (2.885703ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.466025  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (1.936164ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.467355  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.467500  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:20.467529  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:20.467616  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.467655  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.470061  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (1.956208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.471943  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40/status: (4.027838ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.471961  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-39.1578efce10d37299: (7.960718ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0111 23:42:20.475562  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (2.954569ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.475865  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.476121  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:20.476200  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:20.476339  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.476441  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.479122  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (2.213342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.479529  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (2.089279ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.482655  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38/status: (2.58373ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.484655  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (11.641686ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0111 23:42:20.485383  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (2.166013ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.486064  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.486436  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:20.486481  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:20.487676  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.710507ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38780]
I0111 23:42:20.488917  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.492221  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.432017ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.492856  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (1.833706ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.496771  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.01019ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.517169  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.521391  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37/status: (3.63988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.526585  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (2.150795ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.526926  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.527359  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:20.527375  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:20.527469  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.527516  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.531641  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (3.736136ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.532374  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-38.1578efce19c54589: (3.324484ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.535239  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38/status: (1.907628ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.538321  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (1.335338ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.539404  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.539601  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:20.539641  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:20.539804  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.539882  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.543038  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (2.49035ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.543395  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37/status: (2.819318ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.545409  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (1.65717ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.545658  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.545878  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:20.545921  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:20.546098  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.546896  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.548714  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (2.237898ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.550358  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36/status: (3.196037ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.553078  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (2.330084ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.553435  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.553614  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:20.553654  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:20.553910  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.553978  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.556607  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (2.275678ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.557698  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35/status: (2.595673ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.560224  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (1.499061ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.560658  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.561038  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:20.561062  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:20.561162  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.561242  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.563457  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-37.1578efce1a85616f: (21.080015ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0111 23:42:20.565412  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36/status: (3.7259ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.565552  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (3.903733ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.566951  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.894685ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0111 23:42:20.567562  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (1.776533ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38768]
I0111 23:42:20.567998  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.568136  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:20.568156  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:20.568233  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.568308  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.577814  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35/status: (9.095257ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0111 23:42:20.578006  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (9.290532ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.578465  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (9.287624ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38858]
I0111 23:42:20.581008  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (1.943149ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.581358  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.581530  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:20.581551  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:20.581637  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.581687  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.581835  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (1.268262ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0111 23:42:20.582328  120899 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I0111 23:42:20.586211  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34/status: (3.924823ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.586656  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-36.1578efce1ded8826: (7.542127ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0111 23:42:20.588530  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0: (5.896953ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38870]
I0111 23:42:20.588983  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (2.309318ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.589377  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.589530  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:20.589567  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:20.589666  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.589718  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.590457  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-35.1578efce1e646ebf: (3.054527ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0111 23:42:20.590871  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (960.68µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.591874  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (9.444245ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0111 23:42:20.592787  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1: (3.885326ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38870]
I0111 23:42:20.593196  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.292394ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0111 23:42:20.594158  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33/status: (4.157957ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38868]
I0111 23:42:20.597665  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.99005ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0111 23:42:20.598980  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (4.333342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38868]
I0111 23:42:20.599460  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.599632  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:20.599671  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:20.599785  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.599876  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.602510  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2: (8.943057ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0111 23:42:20.604595  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (1.331591ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0111 23:42:20.605057  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (4.199557ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.605523  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34/status: (4.279269ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38826]
I0111 23:42:20.607028  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (2.000904ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0111 23:42:20.607500  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-34.1578efce200b36c3: (4.434418ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0111 23:42:20.608452  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (1.734236ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38782]
I0111 23:42:20.608803  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.608963  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:20.608987  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:20.609083  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.609145  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.612556  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (4.591491ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0111 23:42:20.613474  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (2.544581ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38880]
I0111 23:42:20.613890  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33/status: (2.717013ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0111 23:42:20.615515  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-33.1578efce2085c529: (5.004512ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38884]
I0111 23:42:20.640353  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (23.311382ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38866]
I0111 23:42:20.640711  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.640893  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:20.640918  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:20.641016  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.641071  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.642372  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (25.738624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38882]
I0111 23:42:20.645888  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32/status: (2.282396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38884]
I0111 23:42:20.646989  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (3.725005ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38880]
I0111 23:42:20.647944  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-32.1578efce0e220263: (5.256808ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38900]
I0111 23:42:20.650973  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (2.906974ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38884]
I0111 23:42:20.651463  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (2.322138ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38900]
I0111 23:42:20.651973  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.652523  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:20.652580  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:20.652717  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.652827  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.653850  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (1.696153ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38900]
I0111 23:42:20.654372  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (1.274938ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38884]
I0111 23:42:20.673985  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (19.361063ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0111 23:42:20.674245  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31/status: (20.690114ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38880]
I0111 23:42:20.674562  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (19.106814ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38900]
I0111 23:42:20.678085  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (1.536642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38900]
I0111 23:42:20.678644  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (2.591204ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38880]
I0111 23:42:20.679189  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.679570  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:20.679597  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:20.679694  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.679756  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.680333  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11: (1.108136ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0111 23:42:20.683172  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (1.716093ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38926]
I0111 23:42:20.683466  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30/status: (2.9466ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38884]
I0111 23:42:20.683876  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.337286ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38928]
I0111 23:42:20.684143  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (2.6579ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0111 23:42:20.685972  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (1.613537ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38926]
I0111 23:42:20.686248  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (1.729704ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38928]
I0111 23:42:20.686562  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.686850  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:20.686864  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:20.686945  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.686980  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.688476  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (1.900849ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0111 23:42:20.690453  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31/status: (1.810533ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38930]
I0111 23:42:20.690466  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (1.932595ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38932]
I0111 23:42:20.690878  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (1.809451ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0111 23:42:20.692732  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (1.163943ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38932]
I0111 23:42:20.693148  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-31.1578efce2448b250: (5.057105ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38934]
I0111 23:42:20.693150  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.693549  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:20.693563  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:20.693665  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.693762  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.697337  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-30.1578efce25e36772: (2.722211ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38936]
I0111 23:42:20.698682  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (6.873904ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0111 23:42:20.699727  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (5.72604ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38932]
I0111 23:42:20.699807  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30/status: (5.197844ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38930]
I0111 23:42:20.700908  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (1.848343ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38906]
I0111 23:42:20.704789  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (2.094251ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38930]
I0111 23:42:20.706490  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (1.230208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38930]
I0111 23:42:20.708048  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (1.131501ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38930]
I0111 23:42:20.708421  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (5.546263ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38936]
I0111 23:42:20.708919  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.709355  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:20.709396  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:20.709923  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.709806  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (1.278764ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38930]
I0111 23:42:20.710004  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.711910  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.534595ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38936]
I0111 23:42:20.713924  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (3.307092ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38940]
I0111 23:42:20.713945  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (1.610532ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38930]
I0111 23:42:20.715146  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29/status: (3.591293ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38942]
I0111 23:42:20.715632  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (1.035228ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38940]
I0111 23:42:20.717980  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (2.168705ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38930]
I0111 23:42:20.719930  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.720400  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (3.187626ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38940]
I0111 23:42:20.720488  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26
I0111 23:42:20.720654  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26
I0111 23:42:20.720944  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.721024  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.723761  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.613077ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38944]
I0111 23:42:20.724531  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26/status: (1.906251ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38946]
I0111 23:42:20.725156  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (3.770589ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38936]
I0111 23:42:20.725438  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (3.810166ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38930]
I0111 23:42:20.726934  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (1.577209ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38946]
I0111 23:42:20.727236  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.727453  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:20.727487  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:20.727621  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.727674  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.729697  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (3.180487ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38936]
I0111 23:42:20.730315  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (2.203044ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38930]
I0111 23:42:20.732580  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-29.1578efce27b12eee: (3.916957ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38948]
I0111 23:42:20.732609  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29/status: (4.494437ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38944]
I0111 23:42:20.735169  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (2.051928ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38948]
I0111 23:42:20.735343  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (4.218287ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38936]
I0111 23:42:20.735803  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.736013  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26
I0111 23:42:20.736050  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26
I0111 23:42:20.736202  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.736478  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.739712  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (2.037887ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38950]
I0111 23:42:20.740401  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26/status: (3.195427ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38930]
I0111 23:42:20.740934  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (4.200665ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38948]
I0111 23:42:20.741618  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-26.1578efce28595005: (3.781383ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38952]
I0111 23:42:20.742394  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (1.559888ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38930]
I0111 23:42:20.742567  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (2.385383ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38950]
I0111 23:42:20.743035  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.743317  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:20.743368  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:20.743517  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.743597  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.744390  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (1.189322ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38952]
I0111 23:42:20.746098  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22/status: (2.169762ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38948]
I0111 23:42:20.747259  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (2.03213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38952]
I0111 23:42:20.748502  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-22.1578efce0a49537f: (3.855277ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38954]
I0111 23:42:20.749714  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (4.156822ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38956]
I0111 23:42:20.751584  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (4.985025ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38948]
I0111 23:42:20.752334  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.752623  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23
I0111 23:42:20.752660  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23
I0111 23:42:20.752686  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (2.112614ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38958]
I0111 23:42:20.752845  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.752938  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.755721  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (2.546369ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38954]
I0111 23:42:20.759766  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23/status: (2.88427ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38954]
I0111 23:42:20.760771  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.8887ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38962]
I0111 23:42:20.761954  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (1.820588ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38964]
I0111 23:42:20.762211  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.762477  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:20.762491  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:20.762602  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.762646  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.764674  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (1.314137ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38954]
I0111 23:42:20.764888  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.458192ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38966]
I0111 23:42:20.765080  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (2.13204ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38952]
I0111 23:42:20.766998  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21/status: (3.54634ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38962]
I0111 23:42:20.767990  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (1.531275ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38966]
I0111 23:42:20.769872  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (1.520202ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38966]
I0111 23:42:20.770267  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (2.289873ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38954]
I0111 23:42:20.771009  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.771320  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23
I0111 23:42:20.771343  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23
I0111 23:42:20.771426  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (1.230726ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38966]
I0111 23:42:20.771501  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.771539  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
E0111 23:42:20.774151  120899 rest.go:216] >>>> caught error : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-23\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc010029ce0), Code:409}}
I0111 23:42:20.774201  120899 update.go:183] Unable to store in database : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-23\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc010029ce0), Code:409}}
I0111 23:42:20.774317  120899 trace.go:84] Trace[582023322]: "Update /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23/status" (started: 2019-01-11 23:42:20.772162422 +0000 UTC m=+64.467756219) (total time: 2.096176ms):
Trace[582023322]: [56.549µs] [56.549µs] About to convert to expected version
Trace[582023322]: [178.114µs] [121.565µs] Conversion done
Trace[582023322]: [182.62µs] [4.506µs] About to store object in database
Trace[582023322]: [2.096176ms] [1.913556ms] END
I0111 23:42:20.774328  120899 update.go:53] >>>> UpdateResource bad count : &http.Request{Method:"PUT", URL:(*url.URL)(0xc00b6a1f80), Proto:"HTTP/1.1", ProtoMajor:1, ProtoMinor:1, Header:http.Header{"User-Agent":[]string{"scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format"}, "Content-Length":[]string{"1091"}, "Accept":[]string{"application/json, */*"}, "Content-Type":[]string{"application/json"}, "Accept-Encoding":[]string{"gzip"}}, Body:(*http.body)(0xc00f6eacc0), GetBody:(func() (io.ReadCloser, error))(nil), ContentLength:1091, TransferEncoding:[]string(nil), Close:false, Host:"127.0.0.1:46609", Form:url.Values(nil), PostForm:url.Values(nil), MultipartForm:(*multipart.Form)(nil), Trailer:http.Header(nil), RemoteAddr:"127.0.0.1:38954", RequestURI:"/api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23/status", TLS:(*tls.ConnectionState)(nil), Cancel:(<-chan struct {})(nil), Response:(*http.Response)(nil), ctx:(*context.valueCtx)(0xc00fe6ea80)}
I0111 23:42:20.774427  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23/status: (2.353301ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38954]
I0111 23:42:20.774836  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (3.00722ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38952]
I0111 23:42:20.775184  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (2.541259ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38968]
I0111 23:42:20.776589  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (1.870064ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38954]
I0111 23:42:20.776850  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.777112  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:20.777140  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:20.777185  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-23.1578efce2a40497a: (4.580615ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38970]
I0111 23:42:20.777235  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.777296  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.779026  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (3.740853ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38952]
E0111 23:42:20.779354  120899 rest.go:216] >>>> caught error : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-21\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc010308420), Code:409}}
I0111 23:42:20.779556  120899 update.go:183] Unable to store in database : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-21\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc010308420), Code:409}}
I0111 23:42:20.779707  120899 trace.go:84] Trace[678570414]: "Update /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21/status" (started: 2019-01-11 23:42:20.777640831 +0000 UTC m=+64.473234626) (total time: 2.012117ms):
Trace[678570414]: [56.297µs] [56.297µs] About to convert to expected version
Trace[678570414]: [188.776µs] [132.479µs] Conversion done
Trace[678570414]: [192.726µs] [3.95µs] About to store object in database
Trace[678570414]: [2.012117ms] [1.819391ms] END
I0111 23:42:20.779757  120899 update.go:53] >>>> UpdateResource bad count : &http.Request{Method:"PUT", URL:(*url.URL)(0xc00b7c0500), Proto:"HTTP/1.1", ProtoMajor:1, ProtoMinor:1, Header:http.Header{"Content-Type":[]string{"application/json"}, "Accept-Encoding":[]string{"gzip"}, "User-Agent":[]string{"scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format"}, "Content-Length":[]string{"1091"}, "Accept":[]string{"application/json, */*"}}, Body:(*http.body)(0xc00f1efa00), GetBody:(func() (io.ReadCloser, error))(nil), ContentLength:1091, TransferEncoding:[]string(nil), Close:false, Host:"127.0.0.1:46609", Form:url.Values(nil), PostForm:url.Values(nil), MultipartForm:(*multipart.Form)(nil), Trailer:http.Header(nil), RemoteAddr:"127.0.0.1:38954", RequestURI:"/api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21/status", TLS:(*tls.ConnectionState)(nil), Cancel:(<-chan struct {})(nil), Response:(*http.Response)(nil), ctx:(*context.valueCtx)(0xc00fc5cbd0)}
I0111 23:42:20.779915  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21/status: (2.405626ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38954]
I0111 23:42:20.780152  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (1.98796ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38968]
I0111 23:42:20.782722  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-21.1578efce2ad4720f: (4.538233ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38972]
I0111 23:42:20.783416  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (3.948058ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38952]
I0111 23:42:20.783626  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (3.238757ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38954]
I0111 23:42:20.784023  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.784245  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:20.784298  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:20.784400  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.784452  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.788577  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (4.761702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38972]
I0111 23:42:20.788666  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18/status: (2.549341ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38954]
I0111 23:42:20.788934  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (3.119767ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38968]
I0111 23:42:20.790906  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (1.938888ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38954]
I0111 23:42:20.790930  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (1.855277ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38972]
I0111 23:42:20.791174  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.791365  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:20.791376  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:20.791454  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.791493  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.792997  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-18.1578efce08379fe7: (7.598353ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38974]
I0111 23:42:20.794145  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (2.725374ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38972]
I0111 23:42:20.794270  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (2.154457ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38976]
I0111 23:42:20.794846  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20/status: (3.017261ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38968]
I0111 23:42:20.796457  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (1.082078ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38972]
I0111 23:42:20.797601  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.214234ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38974]
I0111 23:42:20.798617  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (1.496688ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38968]
I0111 23:42:20.799073  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (2.11042ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38976]
I0111 23:42:20.799444  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.800144  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19
I0111 23:42:20.800193  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19
I0111 23:42:20.800363  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.800435  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.800928  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (1.227899ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38968]
I0111 23:42:20.803237  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.875359ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38980]
I0111 23:42:20.803341  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (1.54616ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38968]
I0111 23:42:20.803537  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (2.059032ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38978]
I0111 23:42:20.803766  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19/status: (2.567265ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38974]
I0111 23:42:20.804982  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (1.087822ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38980]
I0111 23:42:20.806812  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (1.581161ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38974]
I0111 23:42:20.807039  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.807191  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:20.807213  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:20.807224  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (1.270033ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38980]
I0111 23:42:20.807344  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.807395  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.809931  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (2.248155ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38982]
I0111 23:42:20.810510  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (2.509755ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38984]
I0111 23:42:20.810905  120899 preemption_test.go:598] Cleaning up all pods...
I0111 23:42:20.811460  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20/status: (3.660183ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38974]
I0111 23:42:20.813566  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-20.1578efce2c8ca659: (5.464625ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38986]
I0111 23:42:20.814100  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (1.186996ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38974]
I0111 23:42:20.814403  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.814575  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:20.814607  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:20.814707  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.814825  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.816554  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (1.465142ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38984]
I0111 23:42:20.817195  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0: (6.103759ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38982]
I0111 23:42:20.817731  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17/status: (2.616905ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38986]
I0111 23:42:20.820733  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (1.209475ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38986]
I0111 23:42:20.821085  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.821346  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16
I0111 23:42:20.821394  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16
I0111 23:42:20.821894  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (6.476085ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38988]
I0111 23:42:20.822002  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.822104  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.825442  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.229218ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38988]
I0111 23:42:20.825975  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (3.117733ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38984]
I0111 23:42:20.826351  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16/status: (2.635521ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38986]
I0111 23:42:20.828269  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1: (10.646292ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38982]
I0111 23:42:20.829114  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (1.464238ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38986]
I0111 23:42:20.830412  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.831491  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:20.831545  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:20.831643  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.834324  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.835416  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2: (5.578805ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38988]
I0111 23:42:20.838582  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (3.878664ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38986]
I0111 23:42:20.838831  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17/status: (2.395665ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38984]
I0111 23:42:20.840698  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-17.1578efce2df09668: (2.518848ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38990]
I0111 23:42:20.841986  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (2.443025ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38984]
I0111 23:42:20.842345  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.842544  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9
I0111 23:42:20.842600  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9
I0111 23:42:20.842692  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.842781  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.846712  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-9.1578efce06f2d041: (3.08188ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38992]
I0111 23:42:20.847469  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (8.38026ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38988]
I0111 23:42:20.847558  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (4.383117ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38986]
I0111 23:42:20.848107  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9/status: (4.914187ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38990]
I0111 23:42:20.852359  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (3.742056ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38990]
I0111 23:42:20.854965  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (6.993379ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38988]
I0111 23:42:20.855987  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:20.856695  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.862049  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (6.638392ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38990]
I0111 23:42:20.862299  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:20.863125  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:20.863149  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:20.863403  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:20.864388  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12
I0111 23:42:20.865560  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12
I0111 23:42:20.867514  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.867761  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.871822  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.345807ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38996]
I0111 23:42:20.872861  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12/status: (3.396167ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38994]
I0111 23:42:20.874211  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (5.9722ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38992]
I0111 23:42:20.875443  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (2.231648ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38994]
I0111 23:42:20.875913  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.876158  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7
I0111 23:42:20.876182  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7
I0111 23:42:20.876266  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.876343  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.879036  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (1.774166ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38996]
I0111 23:42:20.879539  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7/status: (1.851115ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38994]
I0111 23:42:20.879772  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (16.000806ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38990]
I0111 23:42:20.883343  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (2.773147ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38996]
I0111 23:42:20.883693  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.883886  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12
I0111 23:42:20.883899  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12
I0111 23:42:20.883983  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.884093  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.885966  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-7.1578efce045c0aad: (8.632529ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39000]
I0111 23:42:20.886615  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12/status: (2.182688ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38992]
I0111 23:42:20.887051  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (2.529512ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38994]
I0111 23:42:20.888004  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (7.102824ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38990]
I0111 23:42:20.890428  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (2.842884ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38992]
I0111 23:42:20.890855  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-12.1578efce31180186: (3.805661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39000]
I0111 23:42:20.890987  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.891255  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:20.891333  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:20.892580  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.892695  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.895950  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.041114ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39002]
I0111 23:42:20.896217  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (7.780882ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38990]
I0111 23:42:20.896257  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10/status: (2.874525ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38994]
I0111 23:42:20.896977  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (3.879307ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38992]
I0111 23:42:20.898619  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (1.883089ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38994]
I0111 23:42:20.898914  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.899105  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:20.899128  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:20.899208  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.899265  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.902373  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (2.705754ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38992]
I0111 23:42:20.902795  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-10.1578efce3294c8f1: (2.595695ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39004]
I0111 23:42:20.904048  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10/status: (4.368913ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38994]
I0111 23:42:20.905119  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (7.855195ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39002]
I0111 23:42:20.922960  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (18.389733ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39004]
I0111 23:42:20.932090  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.932265  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-11
I0111 23:42:20.932298  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-11
I0111 23:42:20.932391  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.932447  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.937664  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11/status: (4.194968ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39004]
I0111 23:42:20.938245  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11: (4.805631ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38992]
I0111 23:42:20.938407  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.246633ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39008]
I0111 23:42:20.938816  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (33.209661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39002]
I0111 23:42:20.942114  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11: (1.487756ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39004]
I0111 23:42:20.942596  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.942789  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14
I0111 23:42:20.942843  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14
I0111 23:42:20.942996  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.943091  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.945514  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (1.525703ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39008]
I0111 23:42:20.947250  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14/status: (3.181171ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39004]
I0111 23:42:20.948706  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11: (8.880719ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38992]
I0111 23:42:20.949469  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (1.866489ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39004]
I0111 23:42:20.949855  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.034837ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39010]
I0111 23:42:20.950363  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.950574  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14
I0111 23:42:20.950673  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14
I0111 23:42:20.950859  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.950949  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.953126  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (1.729932ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39008]
I0111 23:42:20.954692  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14/status: (3.294531ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39004]
I0111 23:42:20.958619  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-14.1578efce3595cd78: (4.692832ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39008]
I0111 23:42:20.959111  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (2.152459ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39004]
I0111 23:42:20.959504  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (9.17824ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:38992]
I0111 23:42:20.960144  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.960341  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16
I0111 23:42:20.960394  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16
I0111 23:42:20.960512  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.960582  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.962825  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16/status: (1.970526ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39004]
I0111 23:42:20.964367  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (2.447835ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39014]
I0111 23:42:20.965651  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-16.1578efce2e5fab8d: (3.861785ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39016]
I0111 23:42:20.966458  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (1.514514ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39014]
I0111 23:42:20.966922  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (6.41912ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39008]
I0111 23:42:20.966925  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.967311  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15
I0111 23:42:20.967335  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15
I0111 23:42:20.967421  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:20.967458  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:20.971536  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.168542ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:20.973517  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15/status: (5.161316ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39004]
I0111 23:42:20.974332  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (5.259279ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:20.975533  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (1.507507ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39004]
I0111 23:42:20.975914  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:20.976336  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14
I0111 23:42:20.976373  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14
I0111 23:42:20.978068  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (10.22655ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39016]
I0111 23:42:20.979150  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.020681ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:20.982538  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15
I0111 23:42:20.982615  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15
I0111 23:42:20.985191  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.188011ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:20.985994  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (7.464185ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39016]
I0111 23:42:20.989697  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16
I0111 23:42:20.989842  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16
I0111 23:42:20.991373  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (4.972303ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:20.992165  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.653003ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:20.995726  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:20.995841  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:20.998555  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.297478ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.001518  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (8.835983ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.006075  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:21.006128  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:21.009563  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (7.163034ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.011422  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.880464ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.013776  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19
I0111 23:42:21.013839  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19
I0111 23:42:21.016603  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (6.648006ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.017507  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.387266ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.021533  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:21.021627  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:21.023991  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.740902ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.024203  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (7.046367ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.028206  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:21.028372  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:21.030662  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.790888ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.032379  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (7.640759ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.037124  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:21.037255  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:21.049660  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (11.989466ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.051109  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (18.328677ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.055507  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23
I0111 23:42:21.055589  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23
I0111 23:42:21.057459  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.505574ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.057633  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (6.098608ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.061723  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:21.061961  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:21.063313  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (5.130777ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.064849  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.006911ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.067204  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:21.067325  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:21.069127  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (5.52683ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.070389  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.296363ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.073712  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26
I0111 23:42:21.073826  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26
I0111 23:42:21.075452  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (5.519512ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.076505  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.173015ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.083245  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:21.083396  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:21.083707  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (7.19345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.087269  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.88489ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.088949  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:21.089027  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:21.089244  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (4.884524ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.091101  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.710334ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.094215  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:21.094317  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:21.095220  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (5.017848ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.096256  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.591827ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.099834  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:21.099887  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:21.100446  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (4.872236ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.104218  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:21.104318  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:21.104972  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.269902ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.107878  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (7.09122ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.109184  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.22341ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.111393  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:21.111489  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:21.113644  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.895486ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.115586  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (7.260287ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.119567  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:21.119652  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:21.120449  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (4.42659ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.122072  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.939204ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.123798  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:21.123847  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:21.126308  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (5.206392ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.126447  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.060289ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.131120  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:21.131511  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:21.133240  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (6.336496ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.133832  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.668186ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.136672  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:21.136722  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:21.139327  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (5.668878ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.140626  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.591291ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.143704  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:21.143784  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:21.145636  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.547247ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.146981  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (6.113579ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.150072  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:21.150166  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:21.152075  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (4.749054ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.152078  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.601962ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.156363  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:21.156412  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:21.157926  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (5.294031ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.158649  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.921346ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.163355  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:21.163401  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:21.166007  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.163897ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.168844  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (9.315167ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.172940  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:21.173070  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:21.173894  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (4.277561ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.176461  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.290738ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.177715  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42
I0111 23:42:21.177833  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42
I0111 23:42:21.179823  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.543982ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.180124  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (5.667613ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.183834  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:21.183949  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:21.185649  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (5.116409ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.186777  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.343579ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.189355  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44
I0111 23:42:21.189461  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44
I0111 23:42:21.190372  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (4.348736ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.192053  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.161192ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.194030  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:21.194121  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:21.196502  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (5.496375ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.198110  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.268173ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.204053  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:21.204129  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (7.145337ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.204446  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:21.206589  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.860047ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.210397  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:21.210448  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:21.212597  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.756912ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.213124  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (6.230507ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.217943  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48
I0111 23:42:21.218048  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48
I0111 23:42:21.221703  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (6.965614ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.221943  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.318422ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.228780  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:21.228848  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:21.231386  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.766448ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.233359  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (8.285397ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.239000  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-0: (4.776504ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
E0111 23:42:21.240455  120899 rest.go:216] >>>> caught error : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"rpod-1\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dec2ea0), Code:404}}
I0111 23:42:21.240501  120899 delete.go:145] Unable to delete from database : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"rpod-1\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dec2ea0), Code:404}}
I0111 23:42:21.240619  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-1: (1.240404ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.246298  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (5.27713ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.249214  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-0\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e2ddb60), Code:404}}
I0111 23:42:21.249483  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0: (1.485075ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.252270  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-1\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e4918c0), Code:404}}
I0111 23:42:21.252546  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1: (1.216347ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.255451  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-2\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e491e00), Code:404}}
I0111 23:42:21.255694  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2: (1.485375ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.258535  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-3\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df30180), Code:404}}
I0111 23:42:21.258711  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (1.275366ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.261662  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-4\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df4a360), Code:404}}
I0111 23:42:21.261848  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (1.402617ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.264673  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-5\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df4a480), Code:404}}
I0111 23:42:21.265177  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (1.568314ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.272181  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-6\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df30840), Code:404}}
I0111 23:42:21.272403  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (5.523457ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.275659  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-7\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df30f00), Code:404}}
I0111 23:42:21.275846  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (1.491131ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.279003  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-8\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df56060), Code:404}}
I0111 23:42:21.279366  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (1.809049ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.282226  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-9\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df56180), Code:404}}
I0111 23:42:21.282413  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (1.404702ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.285219  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-10\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df4ab40), Code:404}}
I0111 23:42:21.285395  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (1.258241ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.288454  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-11\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df4b0e0), Code:404}}
I0111 23:42:21.288706  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11: (1.442316ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.291818  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-12\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e4db500), Code:404}}
I0111 23:42:21.292100  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (1.661847ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.295018  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-13\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df56960), Code:404}}
I0111 23:42:21.295308  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (1.459289ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.298204  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-14\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e4db680), Code:404}}
I0111 23:42:21.298384  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (1.441006ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.301248  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-15\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df56e40), Code:404}}
I0111 23:42:21.301507  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (1.361526ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.304457  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-16\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df56f60), Code:404}}
I0111 23:42:21.304606  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (1.503205ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.307478  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-17\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df31980), Code:404}}
I0111 23:42:21.307691  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (1.55398ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.310425  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-18\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df57440), Code:404}}
I0111 23:42:21.310560  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (1.103665ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.315052  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-19\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df31ec0), Code:404}}
I0111 23:42:21.315348  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (1.462978ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.318008  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-20\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df57740), Code:404}}
I0111 23:42:21.318163  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (1.140878ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.321327  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-21\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e4dbd40), Code:404}}
I0111 23:42:21.321471  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (1.128803ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.323949  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-22\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dfc2b40), Code:404}}
I0111 23:42:21.324155  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (1.080787ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.327062  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-23\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dfc2c60), Code:404}}
I0111 23:42:21.327258  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (1.404591ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.329990  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-24\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dffc000), Code:404}}
I0111 23:42:21.330118  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (1.079424ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.332757  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-25\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dfd4780), Code:404}}
I0111 23:42:21.332977  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (1.183076ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.336813  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-26\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dfc3860), Code:404}}
I0111 23:42:21.337028  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (1.171949ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.339513  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-27\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dffa060), Code:404}}
I0111 23:42:21.339643  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (1.012469ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.342374  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-28\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dfd4cc0), Code:404}}
I0111 23:42:21.342567  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (1.136807ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.345180  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-29\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df4b7a0), Code:404}}
I0111 23:42:21.345352  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (1.213041ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.348068  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-30\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e024240), Code:404}}
I0111 23:42:21.348302  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (1.352301ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.351085  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-31\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e0244e0), Code:404}}
I0111 23:42:21.351409  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (1.442566ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.354236  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-32\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dfd5740), Code:404}}
I0111 23:42:21.354490  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (1.427112ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.357250  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-33\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dffcde0), Code:404}}
I0111 23:42:21.357426  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (1.38044ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.360234  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-34\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dffd0e0), Code:404}}
I0111 23:42:21.360564  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (1.427639ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.363429  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-35\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df4be00), Code:404}}
I0111 23:42:21.363691  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (1.429967ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.366553  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-36\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e086300), Code:404}}
I0111 23:42:21.366817  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (1.478739ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.369779  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-37\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dfd5f20), Code:404}}
I0111 23:42:21.370051  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (1.451593ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.372909  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-38\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dffd500), Code:404}}
I0111 23:42:21.373220  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (1.519365ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.376026  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-39\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dffda40), Code:404}}
I0111 23:42:21.376244  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (1.299451ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.379241  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-40\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e086480), Code:404}}
I0111 23:42:21.379501  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (1.366614ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.382166  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-41\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e094900), Code:404}}
I0111 23:42:21.382414  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (1.297444ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.385149  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-42\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e086960), Code:404}}
I0111 23:42:21.385455  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (1.409195ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.388338  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-43\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e086a80), Code:404}}
I0111 23:42:21.388482  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (1.39913ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.437816  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-44\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dffa180), Code:404}}
I0111 23:42:21.438013  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (47.735745ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.444871  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-45\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dffa300), Code:404}}
I0111 23:42:21.445069  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (4.414582ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.448117  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-46\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e0872c0), Code:404}}
I0111 23:42:21.448313  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (1.616569ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.451844  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-47\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e0873e0), Code:404}}
I0111 23:42:21.452028  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (1.248822ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.455863  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-48\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e3d2ea0), Code:404}}
I0111 23:42:21.456078  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (2.558999ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.463645  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-49\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e0b0480), Code:404}}
I0111 23:42:21.463996  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (6.128538ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.466819  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"rpod-0\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e0ea840), Code:404}}
I0111 23:42:21.466967  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-0: (1.053672ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.472718  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"rpod-1\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00dffa8a0), Code:404}}
I0111 23:42:21.472965  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-1: (3.034118ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.476843  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"preemptor-pod\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00e3d36e0), Code:404}}
I0111 23:42:21.477058  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (2.359464ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.483099  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (5.499741ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.484831  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-0
I0111 23:42:21.484887  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-0
I0111 23:42:21.485219  120899 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-0", node "node1"
I0111 23:42:21.485928  120899 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I0111 23:42:21.486060  120899 factory.go:1166] Attempting to bind rpod-0 to node1
I0111 23:42:21.486470  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.824932ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.487726  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-1
I0111 23:42:21.488645  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-1
I0111 23:42:21.488906  120899 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-1", node "node1"
I0111 23:42:21.488923  120899 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I0111 23:42:21.489089  120899 factory.go:1166] Attempting to bind rpod-1 to node1
I0111 23:42:21.489249  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-0/binding: (2.263682ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.489874  120899 scheduler.go:569] pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0111 23:42:21.497528  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.308019ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.503182  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-1/binding: (13.701931ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.504166  120899 scheduler.go:569] pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0111 23:42:21.508543  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.04082ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.589169  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-0: (1.953564ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.692129  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-1: (1.797899ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.692472  120899 preemption_test.go:561] Creating the preemptor pod...
I0111 23:42:21.696820  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.030552ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.697101  120899 preemption_test.go:567] Creating additional pods...
I0111 23:42:21.697352  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod
I0111 23:42:21.697380  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod
I0111 23:42:21.697498  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.697567  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.701652  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.769825ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39040]
I0111 23:42:21.707857  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (10.487619ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.707865  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod/status: (9.472507ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.708364  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (10.073431ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0111 23:42:21.711318  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.454305ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39020]
I0111 23:42:21.711808  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (2.733341ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39018]
I0111 23:42:21.712550  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.716548  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod/status: (3.529401ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0111 23:42:21.722230  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (9.144817ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39040]
I0111 23:42:21.725820  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.013355ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39040]
I0111 23:42:21.727921  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-1: (6.854507ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0111 23:42:21.729378  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-0
I0111 23:42:21.729400  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-0
I0111 23:42:21.729535  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.729578  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.731480  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.695364ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39040]
I0111 23:42:21.733763  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0: (3.4849ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39044]
I0111 23:42:21.733781  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0/status: (3.605225ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39042]
I0111 23:42:21.734659  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (5.464277ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0111 23:42:21.738188  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.84998ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39040]
I0111 23:42:21.738249  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0: (3.549523ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39042]
I0111 23:42:21.738641  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.738831  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4
I0111 23:42:21.738876  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4
I0111 23:42:21.739005  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.739090  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.741153  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (1.707745ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39044]
I0111 23:42:21.741579  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.972694ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39040]
I0111 23:42:21.743271  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (8.121616ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0111 23:42:21.744419  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4/status: (2.641491ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39046]
I0111 23:42:21.744832  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.748492ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39040]
I0111 23:42:21.746087  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (1.269642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39046]
I0111 23:42:21.746402  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.746567  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5
I0111 23:42:21.746593  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5
I0111 23:42:21.746706  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.746779  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.747913  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.09536ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0111 23:42:21.748483  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.952922ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39040]
I0111 23:42:21.749488  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5/status: (2.476474ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39046]
I0111 23:42:21.749691  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (1.043925ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39044]
I0111 23:42:21.750797  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.75965ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0111 23:42:21.751485  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (1.127344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39044]
I0111 23:42:21.751827  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.752043  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7
I0111 23:42:21.752058  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7
I0111 23:42:21.752163  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.752223  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.755260  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (2.300584ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39046]
I0111 23:42:21.757657  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7/status: (4.592132ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0111 23:42:21.757690  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.242281ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39050]
I0111 23:42:21.760597  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.400846ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39046]
I0111 23:42:21.760772  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (1.943941ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0111 23:42:21.761200  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.761810  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (8.319109ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39048]
I0111 23:42:21.761903  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8
I0111 23:42:21.764640  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8
I0111 23:42:21.764791  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.764847  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.763554  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.376304ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0111 23:42:21.768220  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (2.607639ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39046]
I0111 23:42:21.768571  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.541086ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0111 23:42:21.768845  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8/status: (2.622341ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39048]
I0111 23:42:21.769848  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.927713ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39052]
I0111 23:42:21.771501  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.770993ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39038]
I0111 23:42:21.774792  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.681846ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39052]
I0111 23:42:21.776339  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (2.461042ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39048]
I0111 23:42:21.778576  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.778920  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9
I0111 23:42:21.778963  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9
I0111 23:42:21.779534  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.779672  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.782173  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (5.895256ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39052]
I0111 23:42:21.782728  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.039149ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39054]
I0111 23:42:21.783020  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (2.8486ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39046]
I0111 23:42:21.785170  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.484998ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39052]
I0111 23:42:21.785199  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9/status: (4.593121ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39048]
I0111 23:42:21.790464  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.351642ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39046]
I0111 23:42:21.799505  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (8.478312ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39046]
I0111 23:42:21.799626  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (7.566996ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39054]
I0111 23:42:21.800494  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.800733  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14
I0111 23:42:21.800809  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14
I0111 23:42:21.800964  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.801059  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.803601  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (1.814872ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39062]
I0111 23:42:21.804538  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14/status: (3.085722ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39054]
I0111 23:42:21.805028  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.641404ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39064]
I0111 23:42:21.806610  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (1.598088ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39054]
I0111 23:42:21.807674  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.808354  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16
I0111 23:42:21.808382  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16
I0111 23:42:21.808359  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.871105ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39046]
I0111 23:42:21.808589  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.808690  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.811436  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.272869ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39062]
I0111 23:42:21.811480  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16/status: (2.327256ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39064]
I0111 23:42:21.813760  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (4.048054ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39066]
I0111 23:42:21.815464  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.931692ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39062]
I0111 23:42:21.816846  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (5.958453ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0111 23:42:21.817857  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (5.68975ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39064]
I0111 23:42:21.818006  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.848875ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39062]
I0111 23:42:21.818255  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.818502  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14
I0111 23:42:21.818526  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14
I0111 23:42:21.818706  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.818816  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.821612  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.794901ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
E0111 23:42:21.821853  120899 rest.go:216] >>>> caught error : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-14\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc011aeed20), Code:409}}
I0111 23:42:21.821932  120899 update.go:183] Unable to store in database : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-14\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc011aeed20), Code:409}}
I0111 23:42:21.822089  120899 trace.go:84] Trace[1833340671]: "Update /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14/status" (started: 2019-01-11 23:42:21.820101323 +0000 UTC m=+65.515695120) (total time: 1.957042ms):
Trace[1833340671]: [126.633µs] [126.633µs] About to convert to expected version
Trace[1833340671]: [254.486µs] [127.853µs] Conversion done
Trace[1833340671]: [259.973µs] [5.487µs] About to store object in database
Trace[1833340671]: [1.957042ms] [1.697069ms] END
I0111 23:42:21.822171  120899 update.go:53] >>>> UpdateResource bad count : &http.Request{Method:"PUT", URL:(*url.URL)(0xc011306880), Proto:"HTTP/1.1", ProtoMajor:1, ProtoMinor:1, Header:http.Header{"Accept-Encoding":[]string{"gzip"}, "User-Agent":[]string{"scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format"}, "Content-Length":[]string{"1091"}, "Accept":[]string{"application/json, */*"}, "Content-Type":[]string{"application/json"}}, Body:(*http.body)(0xc0118718c0), GetBody:(func() (io.ReadCloser, error))(nil), ContentLength:1091, TransferEncoding:[]string(nil), Close:false, Host:"127.0.0.1:46609", Form:url.Values(nil), PostForm:url.Values(nil), MultipartForm:(*multipart.Form)(nil), Trailer:http.Header(nil), RemoteAddr:"127.0.0.1:39066", RequestURI:"/api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14/status", TLS:(*tls.ConnectionState)(nil), Cancel:(<-chan struct {})(nil), Response:(*http.Response)(nil), ctx:(*context.valueCtx)(0xc011ae8510)}
I0111 23:42:21.822616  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14/status: (2.827552ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39066]
I0111 23:42:21.823950  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (3.586519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0111 23:42:21.824613  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.467573ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0111 23:42:21.825068  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-14.1578efce68b935e0: (3.350043ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39074]
I0111 23:42:21.825149  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (2.110338ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39066]
I0111 23:42:21.825490  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.825972  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15
I0111 23:42:21.826022  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15
I0111 23:42:21.826202  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.826325  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.828734  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.449261ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0111 23:42:21.828843  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (1.751356ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0111 23:42:21.829955  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15/status: (2.824695ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39074]
I0111 23:42:21.830489  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.357865ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0111 23:42:21.836972  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.641733ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0111 23:42:21.837428  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (2.095962ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0111 23:42:21.837674  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.837915  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9
I0111 23:42:21.837940  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9
I0111 23:42:21.838065  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.838118  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.840231  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (1.785542ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0111 23:42:21.842143  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9/status: (3.594209ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0111 23:42:21.842609  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.934736ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0111 23:42:21.843160  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-9.1578efce67725d1e: (3.945905ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39078]
I0111 23:42:21.844971  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (1.718943ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0111 23:42:21.845250  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.845524  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26
I0111 23:42:21.845561  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26
I0111 23:42:21.845659  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.845776  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.846145  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.442079ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0111 23:42:21.849225  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (1.737108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0111 23:42:21.849569  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26/status: (3.354127ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0111 23:42:21.850703  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.056793ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0111 23:42:21.852224  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (2.069462ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0111 23:42:21.856164  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:21.857257  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.857555  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:21.857610  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:21.857680  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (5.668789ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39068]
I0111 23:42:21.857835  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (9.05506ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39080]
I0111 23:42:21.858491  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.858583  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.861440  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.813898ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39098]
I0111 23:42:21.861725  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.409929ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0111 23:42:21.862148  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (3.244826ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0111 23:42:21.862435  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:21.863259  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:21.863307  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:21.863651  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:21.867030  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.863948ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39098]
I0111 23:42:21.868154  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27/status: (7.047104ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0111 23:42:21.869609  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.99123ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0111 23:42:21.874772  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.38219ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0111 23:42:21.874982  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (6.275951ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39102]
I0111 23:42:21.875428  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.875602  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9
I0111 23:42:21.875623  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9
I0111 23:42:21.875760  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.875918  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.879479  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (2.163022ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39120]
I0111 23:42:21.880137  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.858674ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0111 23:42:21.880511  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9/status: (3.291441ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0111 23:42:21.880540  120899 backoff_utils.go:79] Backing off 2s
I0111 23:42:21.880879  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-9.1578efce67725d1e: (3.544708ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39122]
I0111 23:42:21.882827  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (1.645061ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0111 23:42:21.883250  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.883332  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.491523ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39120]
I0111 23:42:21.883625  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:21.883649  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:21.883794  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.883847  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.886102  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.19286ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0111 23:42:21.886312  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (2.281213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0111 23:42:21.886963  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.025154ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39126]
I0111 23:42:21.886980  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34/status: (2.471323ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39124]
I0111 23:42:21.889225  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (1.742499ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39124]
I0111 23:42:21.889701  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.848482ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0111 23:42:21.890543  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.890775  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:21.890806  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:21.890976  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.891040  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.893722  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.954638ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0111 23:42:21.894465  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (2.889408ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39128]
I0111 23:42:21.895471  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36/status: (4.135813ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39072]
I0111 23:42:21.896678  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.975929ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39076]
I0111 23:42:21.899449  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.822963ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39130]
I0111 23:42:21.899735  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (1.338923ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39128]
I0111 23:42:21.900546  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.900706  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:21.900732  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:21.900850  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.900909  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.902918  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (1.278604ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39132]
I0111 23:42:21.906104  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (6.033812ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39130]
I0111 23:42:21.906127  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38/status: (4.448653ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39128]
I0111 23:42:21.909730  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (2.388352ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39130]
I0111 23:42:21.910640  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.151022ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39134]
I0111 23:42:21.911870  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.445091ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39132]
I0111 23:42:21.912078  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.912327  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:21.912416  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:21.912564  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.912648  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.919410  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (6.182184ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39134]
I0111 23:42:21.920700  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (6.621377ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39136]
I0111 23:42:21.921170  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40/status: (7.768139ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39130]
I0111 23:42:21.922454  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.599066ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39134]
I0111 23:42:21.923471  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (1.343749ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39136]
I0111 23:42:21.923805  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.924021  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42
I0111 23:42:21.924046  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42
I0111 23:42:21.924461  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.924537  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.926859  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.383116ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39134]
I0111 23:42:21.927505  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (1.357213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39138]
I0111 23:42:21.927593  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42/status: (2.699635ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39136]
I0111 23:42:21.929398  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.925533ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39134]
I0111 23:42:21.930668  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (2.607241ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39136]
I0111 23:42:21.931273  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (6.12635ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39140]
I0111 23:42:21.931848  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.932117  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:21.932155  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:21.932338  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.932427  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.933549  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.641139ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39134]
I0111 23:42:21.938035  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43/status: (2.076188ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39138]
I0111 23:42:21.938184  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.095305ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39134]
I0111 23:42:21.938551  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.07719ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39144]
I0111 23:42:21.938658  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (3.189041ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39140]
I0111 23:42:21.941138  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (1.75679ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39142]
I0111 23:42:21.941646  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.941655  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.609216ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39134]
I0111 23:42:21.941911  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:21.941925  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:21.941998  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.942048  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.944094  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.866878ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39142]
I0111 23:42:21.944825  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.982405ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39148]
I0111 23:42:21.945890  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (1.314764ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39142]
I0111 23:42:21.947983  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45/status: (5.144116ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39140]
I0111 23:42:21.949901  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (1.427506ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39148]
I0111 23:42:21.950202  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.950518  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48
I0111 23:42:21.950541  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48
I0111 23:42:21.950629  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.950718  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.954335  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (2.878669ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39146]
I0111 23:42:21.955197  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.7544ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39150]
I0111 23:42:21.957020  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48/status: (5.484218ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39148]
I0111 23:42:21.959351  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (1.625617ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39150]
I0111 23:42:21.959670  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.960002  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:21.960049  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:21.960221  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.960413  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.963800  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (1.902582ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39146]
I0111 23:42:21.964015  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45/status: (3.233511ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39150]
I0111 23:42:21.965065  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-45.1578efce71208db4: (3.469747ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0111 23:42:21.966975  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (1.698801ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39146]
I0111 23:42:21.967367  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.967556  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48
I0111 23:42:21.967605  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48
I0111 23:42:21.967801  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.967940  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.969632  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (1.467039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0111 23:42:21.971186  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48/status: (2.949275ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39150]
I0111 23:42:21.971724  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-48.1578efce71a4f7a5: (2.90919ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0111 23:42:21.973962  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (2.167206ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39150]
I0111 23:42:21.974260  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:21.974446  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:21.974463  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:21.974567  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:21.974608  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:21.988702  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (11.151076ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0111 23:42:21.991473  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49/status: (16.363436ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39154]
I0111 23:42:22.028797  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (52.0386ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0111 23:42:22.034928  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (4.105785ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0111 23:42:22.036220  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.036443  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:22.036469  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:22.036595  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.036652  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.039493  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47/status: (2.484547ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0111 23:42:22.040895  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (2.558643ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0111 23:42:22.041530  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (1.485246ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0111 23:42:22.041971  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.042123  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.452039ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39232]
I0111 23:42:22.042192  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:22.042218  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:22.042335  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.042385  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.066661  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (21.661239ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39244]
I0111 23:42:22.067354  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (24.464961ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0111 23:42:22.067810  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49/status: (24.538692ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0111 23:42:22.072024  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (2.460838ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39168]
I0111 23:42:22.072487  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-49.1578efce73118afd: (29.214243ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39238]
I0111 23:42:22.073852  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.074100  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-1
I0111 23:42:22.074113  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-1
I0111 23:42:22.074223  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.074263  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.076846  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1: (1.909316ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0111 23:42:22.077628  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.954128ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0111 23:42:22.081861  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1/status: (6.491089ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39244]
I0111 23:42:22.083896  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1: (1.4222ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0111 23:42:22.084132  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.084303  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:22.084324  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:22.084520  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.084620  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.087327  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.958927ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39280]
I0111 23:42:22.089168  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46/status: (3.898727ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39152]
I0111 23:42:22.089694  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (4.856584ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0111 23:42:22.094667  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (4.030241ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0111 23:42:22.095091  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.095254  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-1
I0111 23:42:22.095309  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-1
I0111 23:42:22.095443  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.095489  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
E0111 23:42:22.099557  120899 rest.go:216] >>>> caught error : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-1\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc0125f6cc0), Code:409}}
I0111 23:42:22.099621  120899 update.go:183] Unable to store in database : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-1\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc0125f6cc0), Code:409}}
I0111 23:42:22.099708  120899 trace.go:84] Trace[1692624391]: "Update /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1/status" (started: 2019-01-11 23:42:22.0975602 +0000 UTC m=+65.793153995) (total time: 2.11968ms):
Trace[1692624391]: [73.701µs] [73.701µs] About to convert to expected version
Trace[1692624391]: [204.322µs] [130.621µs] Conversion done
Trace[1692624391]: [211.041µs] [6.719µs] About to store object in database
Trace[1692624391]: [2.11968ms] [1.908639ms] END
I0111 23:42:22.099716  120899 update.go:53] >>>> UpdateResource bad count : &http.Request{Method:"PUT", URL:(*url.URL)(0xc0125e6c00), Proto:"HTTP/1.1", ProtoMajor:1, ProtoMinor:1, Header:http.Header{"Accept-Encoding":[]string{"gzip"}, "User-Agent":[]string{"scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format"}, "Content-Length":[]string{"1087"}, "Accept":[]string{"application/json, */*"}, "Content-Type":[]string{"application/json"}}, Body:(*http.body)(0xc0125dec00), GetBody:(func() (io.ReadCloser, error))(nil), ContentLength:1087, TransferEncoding:[]string(nil), Close:false, Host:"127.0.0.1:46609", Form:url.Values(nil), PostForm:url.Values(nil), MultipartForm:(*multipart.Form)(nil), Trailer:http.Header(nil), RemoteAddr:"127.0.0.1:39278", RequestURI:"/api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1/status", TLS:(*tls.ConnectionState)(nil), Cancel:(<-chan struct {})(nil), Response:(*http.Response)(nil), ctx:(*context.valueCtx)(0xc0125f0ae0)}
I0111 23:42:22.099852  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1/status: (2.411659ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0111 23:42:22.100303  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1: (3.202164ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39280]
I0111 23:42:22.102629  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1: (1.286593ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0111 23:42:22.102922  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.103369  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-1.1578efce79022ce4: (6.916102ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39282]
I0111 23:42:22.103466  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44
I0111 23:42:22.103487  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44
I0111 23:42:22.103588  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.103622  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.106814  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.882669ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39282]
I0111 23:42:22.107048  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44/status: (2.71163ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0111 23:42:22.107520  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (3.488765ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39280]
I0111 23:42:22.110141  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (1.986212ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0111 23:42:22.110438  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.110633  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:22.110653  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:22.110836  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.110921  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.115688  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (4.409315ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0111 23:42:22.117353  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40/status: (2.338952ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39282]
I0111 23:42:22.119040  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-40.1578efce6f600aa9: (2.905145ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39284]
I0111 23:42:22.119726  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (1.560446ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39282]
I0111 23:42:22.120183  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.120410  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:22.120464  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:22.120766  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.120848  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.124500  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.248523ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39286]
I0111 23:42:22.126340  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (4.145441ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0111 23:42:22.127577  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41/status: (6.21611ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39284]
I0111 23:42:22.129933  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (1.860374ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0111 23:42:22.130184  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.130715  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:22.130794  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:22.130922  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.130999  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.137508  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38/status: (5.900939ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0111 23:42:22.137994  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (4.251967ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39286]
I0111 23:42:22.141082  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-38.1578efce6eacd9c5: (6.195123ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0111 23:42:22.142726  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (2.473624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39278]
I0111 23:42:22.143338  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.143655  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:22.143682  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:22.143825  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.143874  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.146331  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (1.600325ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0111 23:42:22.148181  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-41.1578efce7bc8fa06: (2.795629ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39290]
E0111 23:42:22.149614  120899 rest.go:216] >>>> caught error : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-41\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc01267c6c0), Code:409}}
I0111 23:42:22.149660  120899 update.go:183] Unable to store in database : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-41\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc01267c6c0), Code:409}}
I0111 23:42:22.149801  120899 trace.go:84] Trace[988250538]: "Update /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41/status" (started: 2019-01-11 23:42:22.148118836 +0000 UTC m=+65.843712637) (total time: 1.616755ms):
Trace[988250538]: [61.63µs] [61.63µs] About to convert to expected version
Trace[988250538]: [239.172µs] [177.542µs] Conversion done
Trace[988250538]: [244.498µs] [5.326µs] About to store object in database
Trace[988250538]: [1.616755ms] [1.372257ms] END
I0111 23:42:22.149812  120899 update.go:53] >>>> UpdateResource bad count : &http.Request{Method:"PUT", URL:(*url.URL)(0xc012723800), Proto:"HTTP/1.1", ProtoMajor:1, ProtoMinor:1, Header:http.Header{"User-Agent":[]string{"scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format"}, "Content-Length":[]string{"1091"}, "Accept":[]string{"application/json, */*"}, "Content-Type":[]string{"application/json"}, "Accept-Encoding":[]string{"gzip"}}, Body:(*http.body)(0xc012763640), GetBody:(func() (io.ReadCloser, error))(nil), ContentLength:1091, TransferEncoding:[]string(nil), Close:false, Host:"127.0.0.1:46609", Form:url.Values(nil), PostForm:url.Values(nil), MultipartForm:(*multipart.Form)(nil), Trailer:http.Header(nil), RemoteAddr:"127.0.0.1:39286", RequestURI:"/api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41/status", TLS:(*tls.ConnectionState)(nil), Cancel:(<-chan struct {})(nil), Response:(*http.Response)(nil), ctx:(*context.valueCtx)(0xc012787020)}
I0111 23:42:22.149900  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41/status: (1.948401ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39286]
I0111 23:42:22.153906  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (3.204345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39290]
I0111 23:42:22.154228  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.154442  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:22.154488  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:22.154604  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.154702  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.157269  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (1.850231ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0111 23:42:22.157640  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.593338ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0111 23:42:22.159759  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39/status: (4.242438ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39290]
I0111 23:42:22.165179  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (1.832989ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0111 23:42:22.165603  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.166676  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3
I0111 23:42:22.166730  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3
I0111 23:42:22.166901  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.166980  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.169428  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (1.980791ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0111 23:42:22.170936  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.053926ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39294]
I0111 23:42:22.171335  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3/status: (3.342154ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0111 23:42:22.173069  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (1.265323ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0111 23:42:22.173542  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.175760  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:22.175826  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:22.175948  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.176040  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.173582  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (2.12663ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0111 23:42:22.179629  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.745374ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0111 23:42:22.181831  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (4.310044ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39294]
I0111 23:42:22.181839  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10/status: (5.517334ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39288]
I0111 23:42:22.183997  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (1.416063ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39294]
I0111 23:42:22.184314  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.184486  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19
I0111 23:42:22.184508  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19
I0111 23:42:22.184612  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.184662  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.188671  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.683222ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39298]
I0111 23:42:22.188901  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19/status: (3.544143ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0111 23:42:22.189491  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (4.114343ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39294]
I0111 23:42:22.191667  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (1.250938ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0111 23:42:22.191999  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.192182  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:22.192310  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:22.192453  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.192534  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.196867  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (1.971478ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0111 23:42:22.199574  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.308313ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39300]
I0111 23:42:22.201207  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37/status: (3.022849ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39298]
I0111 23:42:22.203391  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (1.489396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39300]
I0111 23:42:22.203726  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.203984  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:22.204028  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:22.204158  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.204301  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.224808  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (19.207994ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0111 23:42:22.225337  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34/status: (20.746106ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39300]
I0111 23:42:22.226852  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-34.1578efce6da89cd6: (21.4593ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39302]
I0111 23:42:22.228306  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (1.463832ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39300]
I0111 23:42:22.228627  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.228869  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:22.228880  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:22.228977  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.229024  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.251887  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (21.684575ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39304]
I0111 23:42:22.251960  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (22.316782ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0111 23:42:22.251983  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20/status: (22.316438ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39302]
I0111 23:42:22.256265  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (3.872356ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39304]
I0111 23:42:22.256732  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.257042  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-11
I0111 23:42:22.257061  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-11
I0111 23:42:22.257203  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.257267  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.261774  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11/status: (3.28013ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0111 23:42:22.262220  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11: (3.787268ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39302]
I0111 23:42:22.262625  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.583859ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39306]
I0111 23:42:22.264932  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11: (1.822039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39302]
I0111 23:42:22.265279  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.265487  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:22.265506  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:22.265643  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.265701  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.269381  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35/status: (3.308702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39306]
I0111 23:42:22.270426  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (1.993127ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39310]
I0111 23:42:22.271680  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (1.433888ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0111 23:42:22.272351  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (6.262036ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39292]
I0111 23:42:22.274455  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (1.517631ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0111 23:42:22.275358  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.275863  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:22.275914  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:22.276084  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.276179  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.278775  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.027687ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39310]
I0111 23:42:22.278798  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (2.04998ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0111 23:42:22.281773  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33/status: (2.643945ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39310]
I0111 23:42:22.284146  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (1.650795ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0111 23:42:22.284435  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.284601  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:22.284620  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:22.284733  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.284803  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.288052  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.510328ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39316]
I0111 23:42:22.288640  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (3.642834ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0111 23:42:22.289456  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21/status: (3.975506ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0111 23:42:22.292362  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (1.753894ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39312]
I0111 23:42:22.292851  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.293425  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:22.293442  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:22.293553  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.293595  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.302072  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (7.516643ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39318]
I0111 23:42:22.302921  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22/status: (8.845275ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39316]
I0111 23:42:22.304496  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (9.293889ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0111 23:42:22.357875  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (50.734694ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39316]
I0111 23:42:22.358649  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.359363  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6
I0111 23:42:22.359391  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6
I0111 23:42:22.359718  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.359897  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.365419  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (3.620623ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39318]
I0111 23:42:22.375579  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (11.350992ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39320]
I0111 23:42:22.376002  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6/status: (13.869533ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39314]
I0111 23:42:22.391828  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (14.179309ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39320]
I0111 23:42:22.392394  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.392636  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12
I0111 23:42:22.392662  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12
I0111 23:42:22.392852  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.392947  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.397090  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.835257ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0111 23:42:22.397397  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12/status: (3.741823ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39320]
I0111 23:42:22.406705  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (8.601919ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39320]
I0111 23:42:22.407053  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (11.831914ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39318]
I0111 23:42:22.407262  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.409343  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23
I0111 23:42:22.409376  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23
I0111 23:42:22.409634  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.409714  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.412782  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (2.435426ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0111 23:42:22.413002  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.058227ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39330]
I0111 23:42:22.414871  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23/status: (4.547407ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39320]
I0111 23:42:22.418357  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (1.952825ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39330]
I0111 23:42:22.418673  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.426672  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:22.426698  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:22.427044  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.427516  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.430725  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (2.651462ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0111 23:42:22.431867  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24/status: (3.469055ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0111 23:42:22.438579  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (11.954699ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39330]
I0111 23:42:22.440378  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (8.81221ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39328]
I0111 23:42:22.441430  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (3.537671ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39332]
I0111 23:42:22.441836  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.442064  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8
I0111 23:42:22.442106  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8
I0111 23:42:22.442240  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.442401  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.445063  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (1.734088ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0111 23:42:22.447311  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8/status: (4.470131ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39330]
I0111 23:42:22.449183  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-8.1578efce6690d15e: (5.766765ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39336]
I0111 23:42:22.449712  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (1.30707ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39330]
I0111 23:42:22.450172  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.450361  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:22.450381  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:22.450506  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.450551  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.461134  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.561612ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39338]
I0111 23:42:22.461303  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (2.847415ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39334]
I0111 23:42:22.461313  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25/status: (2.772195ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39336]
I0111 23:42:22.462991  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (1.268809ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39336]
I0111 23:42:22.463336  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.463580  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:22.463627  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:22.463779  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.463862  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.465388  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (1.261518ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39336]
I0111 23:42:22.466181  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32/status: (1.819471ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39338]
I0111 23:42:22.467360  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.347693ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0111 23:42:22.467943  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (1.27953ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39338]
I0111 23:42:22.468399  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.468655  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:22.468689  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:22.468834  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.468901  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.471047  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (1.807278ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39336]
I0111 23:42:22.471052  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.514473ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39342]
I0111 23:42:22.472523  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28/status: (3.354525ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39340]
I0111 23:42:22.472677  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (2.217522ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39344]
I0111 23:42:22.474436  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (1.26383ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39342]
I0111 23:42:22.474687  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.474924  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-2
I0111 23:42:22.474956  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-2
I0111 23:42:22.475054  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.475094  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.477818  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.024651ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39346]
I0111 23:42:22.477942  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2/status: (2.384349ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39342]
I0111 23:42:22.478854  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2: (2.215823ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39336]
I0111 23:42:22.479584  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2: (1.095825ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39342]
I0111 23:42:22.479882  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.480107  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4
I0111 23:42:22.480120  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4
I0111 23:42:22.480210  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.480247  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.482843  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4/status: (1.945822ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39342]
I0111 23:42:22.483633  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (3.03655ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39346]
I0111 23:42:22.484112  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-4.1578efce6507c928: (3.018756ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39348]
I0111 23:42:22.485270  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (1.153452ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39342]
I0111 23:42:22.485606  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.485850  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:22.485897  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:22.486051  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.486128  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.488379  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27/status: (1.630802ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39346]
I0111 23:42:22.489570  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (3.115826ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39348]
I0111 23:42:22.489846  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-27.1578efce6c271555: (2.39026ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39350]
I0111 23:42:22.489897  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (1.154557ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39346]
I0111 23:42:22.490384  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.490637  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13
I0111 23:42:22.490652  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13
I0111 23:42:22.490815  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.490867  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.493199  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.58947ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39352]
I0111 23:42:22.493387  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (2.203206ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39350]
I0111 23:42:22.493723  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13/status: (2.589303ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39348]
I0111 23:42:22.495897  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (1.324396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39350]
I0111 23:42:22.496171  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.496366  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:22.496410  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:22.496571  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.496650  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.499047  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29/status: (1.773024ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39352]
I0111 23:42:22.499968  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (3.027196ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39350]
I0111 23:42:22.500321  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.002889ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39354]
I0111 23:42:22.503853  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (4.388354ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39352]
I0111 23:42:22.504362  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.504536  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:22.504547  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:22.504645  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.504683  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.507797  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30/status: (2.838122ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39354]
I0111 23:42:22.507810  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (2.380715ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39350]
I0111 23:42:22.507803  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.144696ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39356]
I0111 23:42:22.509733  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (1.482269ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39350]
I0111 23:42:22.510110  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.510419  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15
I0111 23:42:22.510464  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15
I0111 23:42:22.510592  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.510662  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.512511  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (1.41754ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39354]
I0111 23:42:22.513434  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15/status: (2.467168ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39356]
I0111 23:42:22.515228  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-15.1578efce6a3ae0fe: (3.50684ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39358]
I0111 23:42:22.515816  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (1.967703ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39356]
I0111 23:42:22.516151  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.516477  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:22.516525  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:22.516700  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.516801  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.519809  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.225861ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0111 23:42:22.520530  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31/status: (3.447801ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39354]
I0111 23:42:22.521611  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (4.365926ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39358]
I0111 23:42:22.522496  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (1.540547ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39354]
I0111 23:42:22.522838  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.523009  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7
I0111 23:42:22.523029  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7
I0111 23:42:22.523130  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.523182  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.524728  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (1.162897ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0111 23:42:22.525319  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7/status: (1.863343ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39358]
I0111 23:42:22.526823  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-7.1578efce65cfe56d: (2.786244ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39362]
I0111 23:42:22.527386  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (1.606896ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39358]
I0111 23:42:22.531873  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.532074  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:22.532085  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:22.532167  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.532206  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.535006  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.990187ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39364]
I0111 23:42:22.535811  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17/status: (1.836194ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39362]
I0111 23:42:22.535821  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (2.148642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0111 23:42:22.537877  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (1.346469ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0111 23:42:22.538332  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.538624  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:22.538667  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:22.538878  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.538965  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.540643  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (1.409386ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0111 23:42:22.541604  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.046015ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39364]
I0111 23:42:22.543634  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18/status: (3.15652ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39366]
I0111 23:42:22.545631  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (1.256182ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39364]
I0111 23:42:22.546021  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.546311  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:22.546352  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:22.546495  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.546637  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.549638  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (2.73566ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39364]
I0111 23:42:22.550422  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18/status: (3.445342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0111 23:42:22.550735  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-18.1578efce94b4e35c: (3.491965ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39368]
I0111 23:42:22.552493  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (1.49822ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39360]
I0111 23:42:22.552929  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.553183  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4
I0111 23:42:22.553215  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4
I0111 23:42:22.553408  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.553506  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.555653  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (1.590872ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39364]
I0111 23:42:22.556353  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4/status: (1.787353ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39368]
I0111 23:42:22.558486  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-4.1578efce6507c928: (3.708606ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0111 23:42:22.558930  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (2.124294ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39368]
I0111 23:42:22.559208  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.559401  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:22.559423  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:22.559547  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.559631  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.561706  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32/status: (1.777937ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0111 23:42:22.562624  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (2.089942ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39364]
I0111 23:42:22.563601  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-32.1578efce903aeab9: (2.925329ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39372]
I0111 23:42:22.565714  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (1.517822ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39364]
I0111 23:42:22.566044  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.566293  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12
I0111 23:42:22.566318  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12
I0111 23:42:22.566445  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.566498  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.568668  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (1.775333ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0111 23:42:22.570530  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-12.1578efce8c00788e: (2.990346ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0111 23:42:22.572113  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (1.485659ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0111 23:42:22.573202  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12/status: (4.112496ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39364]
I0111 23:42:22.580402  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (6.666789ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0111 23:42:22.580810  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.581157  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6
I0111 23:42:22.581199  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6
I0111 23:42:22.581349  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.581407  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.584144  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6/status: (2.432873ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0111 23:42:22.584529  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (2.107022ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0111 23:42:22.595781  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-6.1578efce8a08592a: (12.751292ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.597026  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (10.625948ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39370]
I0111 23:42:22.597601  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.597848  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:22.597881  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:22.598042  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.598110  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.600732  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (1.621374ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.602114  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-21.1578efce858ebbff: (3.096422ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0111 23:42:22.604321  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21/status: (3.264636ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39378]
I0111 23:42:22.606143  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (1.4042ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0111 23:42:22.606514  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.606755  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:22.606792  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:22.606942  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.606988  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.609482  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33/status: (2.210626ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0111 23:42:22.609929  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (2.2213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.611963  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (1.169126ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.612231  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.612475  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:22.612531  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:22.612729  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.612818  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.615725  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (1.34638ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.616005  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37/status: (2.433769ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0111 23:42:22.617077  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-33.1578efce850ac4a0: (8.116213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39380]
I0111 23:42:22.617608  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (1.156835ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0111 23:42:22.617900  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.618068  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3
I0111 23:42:22.618082  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3
I0111 23:42:22.618172  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.618220  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.620821  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (1.279283ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.621142  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3/status: (1.569877ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0111 23:42:22.622179  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-37.1578efce800ecb0e: (3.626572ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39380]
I0111 23:42:22.622856  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (1.236753ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0111 23:42:22.623089  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.623333  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:22.623349  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:22.623498  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.623537  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.625651  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-3.1578efce7e88e100: (2.623158ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39380]
I0111 23:42:22.626664  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (1.774248ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.626737  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47/status: (2.60638ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0111 23:42:22.628192  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (1.058256ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0111 23:42:22.628461  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.628666  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:22.628685  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:22.628847  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.628885  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.629855  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-47.1578efce76c439eb: (3.538914ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39380]
I0111 23:42:22.631252  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (1.336413ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.631319  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39/status: (2.152535ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0111 23:42:22.633452  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-39.1578efce7dcd54f2: (2.92246ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39380]
I0111 23:42:22.634679  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (3.017496ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39374]
I0111 23:42:22.635101  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.635271  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:22.635342  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:22.635477  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.635552  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.643776  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27/status: (7.908349ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39380]
I0111 23:42:22.644823  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (8.369628ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.645147  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-27.1578efce6c271555: (8.686041ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39382]
I0111 23:42:22.646408  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (1.550996ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39380]
I0111 23:42:22.646718  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.646945  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:22.646968  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:22.647082  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.647138  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.650165  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30/status: (2.21609ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39382]
I0111 23:42:22.657769  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (7.103703ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39382]
I0111 23:42:22.658252  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.658579  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13
I0111 23:42:22.658646  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13
I0111 23:42:22.658661  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-30.1578efce92a9db0a: (10.593053ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39384]
I0111 23:42:22.658305  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (9.389365ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.658974  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.659032  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.661381  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (1.850759ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.661970  120899 backoff_utils.go:79] Backing off 2s
I0111 23:42:22.664790  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-13.1578efce91d6ff0c: (4.971509ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39382]
I0111 23:42:22.665129  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13/status: (2.241504ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39384]
I0111 23:42:22.667265  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (1.483684ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39382]
I0111 23:42:22.667558  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.667777  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:22.667848  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:22.667990  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.668051  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.670108  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (1.482072ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.671207  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-29.1578efce922f3b8c: (2.333594ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39386]
I0111 23:42:22.672548  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (1.963768ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39388]
I0111 23:42:22.674361  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29/status: (6.025151ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39382]
I0111 23:42:22.676481  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (1.471677ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.676797  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.677022  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23
I0111 23:42:22.677061  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23
I0111 23:42:22.677181  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.677257  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.680214  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (2.245796ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.682594  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23/status: (1.896413ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39386]
I0111 23:42:22.683363  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-23.1578efce8d0077b9: (4.539803ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0111 23:42:22.684645  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (1.602282ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39386]
I0111 23:42:22.685049  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.685355  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:22.685417  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:22.685608  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.685845  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.689324  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22/status: (2.859949ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.689348  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (3.352211ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0111 23:42:22.690781  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-22.1578efce8614e69f: (3.823659ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39392]
I0111 23:42:22.691368  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (1.644162ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39390]
I0111 23:42:22.691693  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.691902  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:22.691944  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:22.692041  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.692111  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.694624  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (1.434898ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.697432  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-34.1578efce6da89cd6: (3.347577ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39394]
I0111 23:42:22.698516  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34/status: (5.33152ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39392]
I0111 23:42:22.700231  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (1.229837ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39394]
I0111 23:42:22.700535  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.700733  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:22.700774  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:22.700942  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.700998  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.703809  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (2.536706ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.704270  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10/status: (2.913122ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39394]
I0111 23:42:22.705326  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-10.1578efce7f12de31: (3.505429ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39396]
I0111 23:42:22.705957  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (1.301149ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39394]
I0111 23:42:22.706373  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.706550  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:22.706571  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:22.706670  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.706722  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.708702  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (1.182406ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.709659  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31/status: (2.088481ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39396]
I0111 23:42:22.711078  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-31.1578efce9362afcb: (3.479637ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39398]
I0111 23:42:22.711173  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (1.091508ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39396]
I0111 23:42:22.711518  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.711808  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:22.711858  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:22.712009  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.712090  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.715605  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28/status: (2.785822ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.716758  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (1.277097ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39398]
I0111 23:42:22.718073  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (2.008765ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.718533  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.718664  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-28.1578efce9087c89c: (4.992633ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39400]
I0111 23:42:22.719160  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:22.719175  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:22.719303  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.719347  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.723576  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (3.391893ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39398]
I0111 23:42:22.723835  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24/status: (4.24826ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.726390  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (2.135295ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.727133  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.726884  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-24.1578efce8e0adcbb: (6.654809ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39402]
I0111 23:42:22.727610  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:22.727638  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:22.727769  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.727857  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.730998  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-35.1578efce846b3fa2: (2.277569ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.731694  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (3.352885ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39398]
I0111 23:42:22.736250  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35/status: (6.990168ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39376]
I0111 23:42:22.738204  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (1.430425ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39398]
I0111 23:42:22.738544  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.738770  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:22.738800  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:22.738897  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.738954  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.742213  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17/status: (2.976349ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39398]
I0111 23:42:22.742691  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (2.995023ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.743464  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-17.1578efce944dcfb8: (3.426998ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39406]
I0111 23:42:22.744778  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (1.541105ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39398]
I0111 23:42:22.745213  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.745419  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:22.745443  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:22.745569  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.745661  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.747195  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (1.251338ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.748680  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25/status: (2.396835ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39406]
I0111 23:42:22.749765  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-25.1578efce8f6fdf39: (2.558461ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:22.750715  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (1.592829ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39406]
I0111 23:42:22.751121  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.754422  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod
I0111 23:42:22.754465  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod
I0111 23:42:22.754621  120899 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod", node "node1"
I0111 23:42:22.754662  120899 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I0111 23:42:22.754733  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5
I0111 23:42:22.754787  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5
I0111 23:42:22.754829  120899 factory.go:1166] Attempting to bind preemptor-pod to node1
I0111 23:42:22.755179  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.755258  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.758134  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5/status: (2.223311ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.785322  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod/binding: (29.276185ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:22.785732  120899 scheduler.go:569] pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0111 23:42:22.786313  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (29.332096ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39410]
I0111 23:42:22.786581  120899 backoff_utils.go:79] Backing off 2s
I0111 23:42:22.787338  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-5.1578efce657d1c2e: (30.761311ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39412]
I0111 23:42:22.790018  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (4.88834ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.790716  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.790881  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-0
I0111 23:42:22.790897  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-0
I0111 23:42:22.790972  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:22.791027  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:22.791176  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.576579ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39410]
I0111 23:42:22.793665  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0: (2.242146ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:22.793969  120899 backoff_utils.go:79] Backing off 2s
I0111 23:42:22.796813  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0/status: (5.525859ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.800936  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0: (3.669433ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.802075  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-0.1578efce6476a82e: (3.648912ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39410]
I0111 23:42:22.802520  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:22.804496  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (3.938036ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39414]
I0111 23:42:22.804809  120899 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I0111 23:42:22.806695  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0: (1.676066ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.808526  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1: (1.478124ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.810019  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2: (1.097214ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.811736  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (1.332425ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.816660  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (4.388604ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.818682  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (1.547872ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.822700  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (3.56761ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.826857  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (3.4019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.835720  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (8.388207ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.839939  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (2.524363ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.842051  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (1.653297ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.844684  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11: (2.108514ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.846678  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (1.546905ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.849774  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (2.593944ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.852763  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (1.867458ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.856342  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:22.857611  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (1.908427ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.859607  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (1.550965ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.862011  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (1.84594ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.862544  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:22.863350  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:22.863454  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:22.864236  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (1.534402ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.864491  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:22.866670  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (1.779894ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.868949  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (1.783007ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.871316  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (1.759814ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.875196  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (3.283345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.877941  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (2.188059ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.880421  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (1.861096ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.882231  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (1.392263ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.883993  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (1.375103ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.886382  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (1.859292ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.888554  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (1.806832ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.890622  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (1.57252ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.893085  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (2.067121ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.896326  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (2.52895ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.898503  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (1.626224ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.900452  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (1.455404ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.902034  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (1.205521ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.904095  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (1.449903ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.905920  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (1.371009ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.907681  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (1.297869ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.909166  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (1.130661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.910699  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (1.099051ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.912500  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (1.302415ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.914249  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (1.340769ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.916260  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (1.246522ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.918158  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (1.353249ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.920385  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (1.745999ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.922126  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (1.313354ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.924002  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (1.414542ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.925918  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (1.447628ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.927493  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (1.206216ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.929124  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (1.231316ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.929403  120899 preemption_test.go:598] Cleaning up all pods...
I0111 23:42:22.932609  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-0
I0111 23:42:22.932646  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-0
I0111 23:42:22.934482  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.546989ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:22.935642  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0: (6.03166ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.939975  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-1
I0111 23:42:22.940109  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-1
I0111 23:42:22.941985  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1: (5.973895ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.944137  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.287801ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:22.946612  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-2
I0111 23:42:22.946659  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-2
I0111 23:42:22.948996  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.823759ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:22.949453  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2: (6.156895ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.954537  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3
I0111 23:42:22.954583  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3
I0111 23:42:22.956687  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.761985ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:22.959218  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (9.159875ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.963225  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4
I0111 23:42:22.963300  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4
I0111 23:42:22.964928  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (5.349153ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.966650  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.257625ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:22.968671  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5
I0111 23:42:22.968721  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5
I0111 23:42:22.971070  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.032934ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:22.971507  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (6.164831ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.978212  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6
I0111 23:42:22.978327  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6
I0111 23:42:22.979493  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (7.440084ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.981853  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.912502ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:22.983154  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7
I0111 23:42:22.983208  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7
I0111 23:42:22.985048  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.5143ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:22.985419  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (5.497797ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.989424  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8
I0111 23:42:22.989521  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8
I0111 23:42:22.991236  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (5.374064ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:22.991450  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.459993ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:22.996963  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9
I0111 23:42:22.997010  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9
I0111 23:42:22.999391  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.102423ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:22.999726  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (8.064729ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.004048  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:23.004241  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:23.006083  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (5.986817ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.006643  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.028272ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.010140  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-11
I0111 23:42:23.010196  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-11
I0111 23:42:23.011255  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11: (4.527616ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.012408  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.538887ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.022349  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (10.209373ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.025328  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12
I0111 23:42:23.025447  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12
I0111 23:42:23.026542  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13
I0111 23:42:23.026650  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13
I0111 23:42:23.027800  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.945925ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.028382  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (5.686013ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.033736  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (4.887038ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.038781  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (4.701502ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.043491  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (4.297145ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.045732  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (16.738971ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.048515  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15
I0111 23:42:23.048567  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15
I0111 23:42:23.049982  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (6.113314ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.050140  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:23.050174  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:23.051656  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.210044ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.055815  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.017131ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.058207  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:23.059214  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:23.060469  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (9.976844ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.062593  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.785218ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.064528  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19
I0111 23:42:23.064842  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19
I0111 23:42:23.066997  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.50195ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.067233  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (6.055008ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.071040  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:23.071722  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:23.074307  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (6.218701ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.077103  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.833103ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.078845  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:23.078949  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:23.080533  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (5.484631ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.081337  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.72364ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.084263  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:23.084456  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:23.087238  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (5.83688ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.089344  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.236471ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.090982  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23
I0111 23:42:23.091032  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23
I0111 23:42:23.093093  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (5.270269ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.093856  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.352463ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.097396  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:23.098335  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:23.100413  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (5.893026ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.100672  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.714606ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.103690  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:23.103962  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:23.105321  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (4.33482ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.105993  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.571551ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.109500  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26
I0111 23:42:23.109601  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26
I0111 23:42:23.110890  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (5.125584ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.115795  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:23.115897  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:23.116032  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.804215ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.117605  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (5.827304ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.120714  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.032891ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.122704  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:23.122819  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:23.124555  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (4.956142ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.125383  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.988021ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.128840  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:23.128928  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:23.130897  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.626772ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.131515  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (6.491955ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.136706  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:23.136777  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:23.138377  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (6.38445ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.139198  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.059098ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.143049  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:23.143086  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:23.144332  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (4.563263ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.145448  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.977126ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.148024  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:23.148074  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:23.149496  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (4.120569ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.149953  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.473837ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.152978  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:23.153090  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:23.156709  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.150638ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.157661  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (7.715599ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.161643  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:23.162022  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:23.163531  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (5.427109ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.164210  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.790272ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.166787  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:23.166830  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:23.168996  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.898262ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.169592  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (5.640309ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.173243  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:23.173371  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:23.176213  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.476012ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.176697  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (6.547394ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.180012  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:23.180126  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:23.181428  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (4.30995ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.182022  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.549992ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.185505  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:23.185598  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:23.187245  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (4.732058ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.187528  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.59389ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.190862  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:23.191033  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:23.191658  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (4.011979ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.194673  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:23.194722  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:23.195140  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.039775ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.196905  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (4.828214ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.197702  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.52718ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.200400  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:23.200481  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:23.202245  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (4.861334ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.202959  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.13956ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.206040  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42
I0111 23:42:23.206080  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42
I0111 23:42:23.207794  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.429708ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.207906  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (5.04182ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.210976  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:23.211063  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:23.212093  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (3.884078ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.215813  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.049812ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.216004  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44
I0111 23:42:23.216080  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44
I0111 23:42:23.217894  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (5.435291ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.218691  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.161652ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.221223  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:23.221269  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:23.223123  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.541107ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.223156  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (4.710387ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.226638  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:23.227771  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:23.228343  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (4.649277ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.230135  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.965597ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.231949  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:23.231990  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:23.233824  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (5.116288ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.234596  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.274922ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.238880  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48
I0111 23:42:23.238964  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48
I0111 23:42:23.240105  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (5.954244ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.241032  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.692885ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.243554  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:23.244209  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:23.245682  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (5.148602ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.247554  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.644913ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.250482  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-0: (4.43464ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
E0111 23:42:23.251821  120899 rest.go:216] >>>> caught error : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"rpod-1\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b5337a0), Code:404}}
I0111 23:42:23.251879  120899 delete.go:145] Unable to delete from database : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"rpod-1\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b5337a0), Code:404}}
I0111 23:42:23.251989  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-1: (1.109757ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.257189  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (4.768167ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.259940  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-0\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b4d9da0), Code:404}}
I0111 23:42:23.260269  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0: (1.29969ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.263034  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-1\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b27e0c0), Code:404}}
I0111 23:42:23.263251  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1: (1.22099ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.265974  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-2\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b2eb020), Code:404}}
I0111 23:42:23.266206  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2: (1.299671ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.268932  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-3\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b15e000), Code:404}}
I0111 23:42:23.269083  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (1.201959ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.271918  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-4\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b27e1e0), Code:404}}
I0111 23:42:23.272148  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (1.36362ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.274952  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-5\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b2147e0), Code:404}}
I0111 23:42:23.275107  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (1.319606ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.277726  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-6\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b27e660), Code:404}}
I0111 23:42:23.277936  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (1.27175ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.280606  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-7\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b15fe60), Code:404}}
I0111 23:42:23.280764  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (1.143823ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.283350  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-8\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b14a000), Code:404}}
I0111 23:42:23.283577  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (1.161524ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.286167  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-9\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b14a240), Code:404}}
I0111 23:42:23.286461  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (1.291059ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.288926  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-10\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b1ea7e0), Code:404}}
I0111 23:42:23.289054  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (1.057599ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.291660  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-11\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b14a900), Code:404}}
I0111 23:42:23.291978  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11: (1.425693ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.381855  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-12\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b215e60), Code:404}}
I0111 23:42:23.382360  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (88.696281ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.385397  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-13\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b27f200), Code:404}}
I0111 23:42:23.385886  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (1.734994ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.391548  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-14\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b27f380), Code:404}}
I0111 23:42:23.391905  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (4.211691ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.401242  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-15\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b27f980), Code:404}}
I0111 23:42:23.401502  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (5.713122ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.404522  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-16\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b14ae40), Code:404}}
I0111 23:42:23.404812  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (1.437632ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.407621  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-17\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b14b0e0), Code:404}}
I0111 23:42:23.408071  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (1.60275ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.410784  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-18\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b1ebb60), Code:404}}
I0111 23:42:23.411014  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (1.296986ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.414966  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-19\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b0ac180), Code:404}}
I0111 23:42:23.415219  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (2.569239ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.418857  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-20\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b0ac9c0), Code:404}}
I0111 23:42:23.420176  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (2.499736ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.422898  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-21\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b0acfc0), Code:404}}
I0111 23:42:23.423119  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (1.290914ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.426423  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-22\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b0e8600), Code:404}}
I0111 23:42:23.426673  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (1.875919ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.429427  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-23\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b0ad320), Code:404}}
I0111 23:42:23.429670  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (1.388779ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.432531  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-24\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b0ad560), Code:404}}
I0111 23:42:23.432787  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (1.496884ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.440537  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-25\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b1ebf80), Code:404}}
I0111 23:42:23.440737  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (1.405641ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.443316  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-26\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b09cc60), Code:404}}
I0111 23:42:23.443459  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (1.116679ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.446141  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-27\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00afce6c0), Code:404}}
I0111 23:42:23.446327  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (1.203275ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.448935  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-28\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b4b9500), Code:404}}
I0111 23:42:23.449080  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (1.085488ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.451580  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-29\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00afcf4a0), Code:404}}
I0111 23:42:23.451713  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (1.043707ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.456055  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-30\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b4b9bc0), Code:404}}
I0111 23:42:23.456239  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (2.296739ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.458847  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-31\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00af86180), Code:404}}
I0111 23:42:23.458999  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (1.159486ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.462368  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-32\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b0e8d20), Code:404}}
I0111 23:42:23.462538  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (2.038835ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.469135  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-33\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b0909c0), Code:404}}
I0111 23:42:23.469374  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (5.315711ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.472215  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-34\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b090d80), Code:404}}
I0111 23:42:23.472426  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (1.421857ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.474956  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-35\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00af65020), Code:404}}
I0111 23:42:23.475112  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (1.132865ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.477549  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-36\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b0918c0), Code:404}}
I0111 23:42:23.477683  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (1.024049ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.481620  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-37\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b091c80), Code:404}}
I0111 23:42:23.481789  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (1.171455ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.484564  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-38\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b4b9ec0), Code:404}}
I0111 23:42:23.484719  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (1.206663ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.487479  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-39\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae8a1e0), Code:404}}
I0111 23:42:23.487618  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (1.137477ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.490153  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-40\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae600c0), Code:404}}
I0111 23:42:23.490322  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (1.086497ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.492899  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-41\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae8aae0), Code:404}}
I0111 23:42:23.493032  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (1.059479ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.495633  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-42\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00af65ec0), Code:404}}
I0111 23:42:23.495886  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (1.170053ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.501359  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-43\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae8acc0), Code:404}}
I0111 23:42:23.501626  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (4.094575ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.504407  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-44\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b3dfb60), Code:404}}
I0111 23:42:23.504568  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (1.327708ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.507672  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-45\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00b3dfe60), Code:404}}
I0111 23:42:23.507850  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (1.384487ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.510927  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-46\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae618c0), Code:404}}
I0111 23:42:23.511575  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (1.697359ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.517985  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-47\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae8b440), Code:404}}
I0111 23:42:23.518183  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (1.373954ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.523269  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-48\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae14d20), Code:404}}
I0111 23:42:23.523472  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (1.426894ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.526322  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-49\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae8b6e0), Code:404}}
I0111 23:42:23.526496  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (1.414909ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.529533  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"rpod-0\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae8b920), Code:404}}
I0111 23:42:23.529712  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-0: (1.358551ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.532617  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"rpod-1\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00af87d40), Code:404}}
I0111 23:42:23.532909  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-1: (1.419878ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.538522  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"preemptor-pod\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00ae8bf20), Code:404}}
I0111 23:42:23.538693  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (1.420837ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.541862  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.453092ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.542203  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-0
I0111 23:42:23.542606  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-0
I0111 23:42:23.542825  120899 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-0", node "node1"
I0111 23:42:23.542876  120899 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I0111 23:42:23.542971  120899 factory.go:1166] Attempting to bind rpod-0 to node1
I0111 23:42:23.545609  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-0/binding: (2.267203ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.546273  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.915984ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.546774  120899 scheduler.go:569] pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0111 23:42:23.546812  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-1
I0111 23:42:23.547082  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-1
I0111 23:42:23.547326  120899 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-1", node "node1"
I0111 23:42:23.547398  120899 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I0111 23:42:23.547693  120899 factory.go:1166] Attempting to bind rpod-1 to node1
I0111 23:42:23.550202  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-1/binding: (2.226387ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.550421  120899 scheduler.go:569] pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0111 23:42:23.550933  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.211474ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.553240  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.742586ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.649127  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-0: (2.008896ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.752452  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-1: (2.449323ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.752782  120899 preemption_test.go:561] Creating the preemptor pod...
I0111 23:42:23.755544  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod
I0111 23:42:23.755612  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod
I0111 23:42:23.755919  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.756005  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.755584  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.537939ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.757451  120899 preemption_test.go:567] Creating additional pods...
I0111 23:42:23.759729  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.02575ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.760248  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod/status: (3.397601ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.761021  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (2.814448ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39444]
I0111 23:42:23.762192  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (1.496465ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.763803  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.762192  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.974571ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.766582  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod/status: (2.325277ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.767197  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (10.331517ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39446]
I0111 23:42:23.769563  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.683507ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39408]
I0111 23:42:23.772470  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.366861ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39446]
I0111 23:42:23.774861  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.910509ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39446]
I0111 23:42:23.776777  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-1: (9.792522ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.779486  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.329726ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.779497  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.830096ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39446]
I0111 23:42:23.782492  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.11193ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.785018  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.009747ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.787878  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.094564ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.806711  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod
I0111 23:42:23.806760  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod
I0111 23:42:23.806954  120899 scheduler_binder.go:211] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod", node "node1"
I0111 23:42:23.806967  120899 scheduler_binder.go:221] AssumePodVolumes for pod "preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I0111 23:42:23.807007  120899 factory.go:1166] Attempting to bind preemptor-pod to node1
I0111 23:42:23.808360  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8
I0111 23:42:23.808380  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8
I0111 23:42:23.808494  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.808537  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.812165  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.203914ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39452]
I0111 23:42:23.813019  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (24.526223ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.815707  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.254946ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.816024  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod/binding: (8.585983ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39444]
I0111 23:42:23.816475  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (7.016816ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39450]
I0111 23:42:23.816891  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8/status: (4.824855ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39448]
I0111 23:42:23.818331  120899 scheduler.go:569] pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I0111 23:42:23.822988  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.575831ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39450]
I0111 23:42:23.824419  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (5.550586ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39404]
I0111 23:42:23.824862  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (4.993322ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39448]
I0111 23:42:23.825610  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.827001  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7
I0111 23:42:23.827016  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7
I0111 23:42:23.827114  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.827155  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.830873  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.781033ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39450]
I0111 23:42:23.832053  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7/status: (2.667176ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39452]
I0111 23:42:23.838448  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (5.599815ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39450]
I0111 23:42:23.838946  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (10.594714ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39462]
I0111 23:42:23.840568  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (11.532155ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39460]
I0111 23:42:23.841039  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (7.595999ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39452]
I0111 23:42:23.841444  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.468567ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39450]
I0111 23:42:23.841535  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.842395  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:23.842413  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:23.842511  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.842549  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.844264  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.777519ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39460]
I0111 23:42:23.846697  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.190803ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39468]
I0111 23:42:23.847167  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (2.649019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39460]
I0111 23:42:23.847529  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10/status: (4.249836ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39462]
I0111 23:42:23.849623  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.755046ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39470]
I0111 23:42:23.850466  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (1.391819ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39460]
I0111 23:42:23.850705  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.851505  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14
I0111 23:42:23.852770  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14
I0111 23:42:23.852699  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.320952ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39470]
I0111 23:42:23.855596  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.012648ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39460]
I0111 23:42:23.856468  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.856512  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:23.859458  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (2.267972ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39468]
I0111 23:42:23.859887  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.853333ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39460]
I0111 23:42:23.861327  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.863420  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:23.864020  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.01797ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39472]
I0111 23:42:23.864087  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:23.864099  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:23.864539  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14/status: (2.900024ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39468]
I0111 23:42:23.864601  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.089097ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39460]
I0111 23:42:23.864649  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:23.868669  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.163221ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39460]
I0111 23:42:23.869806  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (3.948524ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39468]
I0111 23:42:23.870081  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.870227  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16
I0111 23:42:23.870247  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16
I0111 23:42:23.870367  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.870411  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.873129  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16/status: (2.513808ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39468]
I0111 23:42:23.874950  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.823957ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39474]
I0111 23:42:23.875529  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (1.985237ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39468]
I0111 23:42:23.875846  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.876025  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14
I0111 23:42:23.876037  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14
I0111 23:42:23.876112  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.876144  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (4.339806ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39472]
I0111 23:42:23.876169  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.876587  120899 backoff_utils.go:79] Backing off 2s
E0111 23:42:23.878699  120899 rest.go:216] >>>> caught error : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-14\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc0076bb620), Code:409}}
I0111 23:42:23.878768  120899 update.go:183] Unable to store in database : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-14\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc0076bb620), Code:409}}
I0111 23:42:23.878924  120899 trace.go:84] Trace[232207754]: "Update /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14/status" (started: 2019-01-11 23:42:23.876923248 +0000 UTC m=+67.572517048) (total time: 1.96934ms):
Trace[232207754]: [59.461µs] [59.461µs] About to convert to expected version
Trace[232207754]: [148.56µs] [89.099µs] Conversion done
Trace[232207754]: [152.038µs] [3.478µs] About to store object in database
Trace[232207754]: [1.96934ms] [1.817302ms] END
I0111 23:42:23.878931  120899 update.go:53] >>>> UpdateResource bad count : &http.Request{Method:"PUT", URL:(*url.URL)(0xc00aff4400), Proto:"HTTP/1.1", ProtoMajor:1, ProtoMinor:1, Header:http.Header{"Accept-Encoding":[]string{"gzip"}, "User-Agent":[]string{"scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format"}, "Content-Length":[]string{"1091"}, "Accept":[]string{"application/json, */*"}, "Content-Type":[]string{"application/json"}}, Body:(*http.body)(0xc005089680), GetBody:(func() (io.ReadCloser, error))(nil), ContentLength:1091, TransferEncoding:[]string(nil), Close:false, Host:"127.0.0.1:46609", Form:url.Values(nil), PostForm:url.Values(nil), MultipartForm:(*multipart.Form)(nil), Trailer:http.Header(nil), RemoteAddr:"127.0.0.1:39468", RequestURI:"/api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14/status", TLS:(*tls.ConnectionState)(nil), Cancel:(<-chan struct {})(nil), Response:(*http.Response)(nil), ctx:(*context.valueCtx)(0xc0015484b0)}
I0111 23:42:23.879038  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14/status: (2.266848ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39468]
I0111 23:42:23.879383  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (2.735623ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39472]
I0111 23:42:23.879644  120899 backoff_utils.go:79] Backing off 2s
I0111 23:42:23.880819  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-14.1578efcee38644f0: (3.637985ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39474]
I0111 23:42:23.882304  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (2.790929ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39468]
I0111 23:42:23.882635  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.882861  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:23.882922  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:23.883033  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.883102  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.885609  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.678941ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39472]
I0111 23:42:23.886704  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (17.423083ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39460]
I0111 23:42:23.887694  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18/status: (2.1461ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39476]
I0111 23:42:23.888637  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (5.036283ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39474]
I0111 23:42:23.889544  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.941079ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39460]
I0111 23:42:23.891830  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.918508ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39460]
I0111 23:42:23.891992  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (1.977786ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39474]
I0111 23:42:23.892442  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.892611  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:23.892649  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:23.892965  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.893043  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.896781  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.577672ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39480]
I0111 23:42:23.897190  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.312019ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39460]
I0111 23:42:23.897900  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (3.475027ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39478]
I0111 23:42:23.898081  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17/status: (4.468746ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39472]
I0111 23:42:23.899916  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (1.395765ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39472]
I0111 23:42:23.900205  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.900640  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:23.900667  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:23.900799  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.900861  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.903563  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.174117ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39478]
I0111 23:42:23.903713  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (2.149783ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39482]
I0111 23:42:23.904642  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.574516ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39484]
I0111 23:42:23.904688  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24/status: (3.318917ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39480]
I0111 23:42:23.906172  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.513973ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39478]
I0111 23:42:23.907966  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (2.608699ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39480]
I0111 23:42:23.908327  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.908491  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:23.908529  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:23.908648  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.908703  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.909410  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.700503ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39482]
I0111 23:42:23.910535  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (1.025822ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39484]
I0111 23:42:23.911993  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25/status: (2.414718ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39480]
I0111 23:42:23.913102  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.89527ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39484]
I0111 23:42:23.913618  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (1.183983ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39480]
I0111 23:42:23.913929  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.914846  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:23.914912  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:23.915033  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.915085  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.916627  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (5.461433ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39486]
I0111 23:42:23.917665  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27/status: (2.283073ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39484]
I0111 23:42:23.918063  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (2.288368ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39482]
I0111 23:42:23.920051  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.932165ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39488]
I0111 23:42:23.920081  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.703226ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39486]
I0111 23:42:23.920567  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (2.515219ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39484]
I0111 23:42:23.920854  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.921083  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:23.921106  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:23.921187  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.921251  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.924106  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25/status: (2.53582ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39486]
I0111 23:42:23.924667  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (2.254562ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39482]
I0111 23:42:23.925889  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-25.1578efcee6596f84: (3.636068ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0111 23:42:23.927666  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (2.700515ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39486]
I0111 23:42:23.928196  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.496925ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39488]
I0111 23:42:23.928522  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.928897  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:23.928953  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:23.929085  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.929174  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.931622  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.650484ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0111 23:42:23.932470  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (1.981705ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39490]
I0111 23:42:23.933020  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30/status: (3.552686ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39482]
I0111 23:42:23.933248  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.046366ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0111 23:42:23.933853  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.621935ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0111 23:42:23.936760  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (2.743204ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39490]
I0111 23:42:23.937434  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.937614  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:23.937629  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:23.937723  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.937782  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.936807  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.536465ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0111 23:42:23.939396  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (1.300414ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0111 23:42:23.941429  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.159013ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0111 23:42:23.941429  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31/status: (2.479888ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39490]
I0111 23:42:23.941963  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.288037ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39496]
I0111 23:42:23.944676  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (2.03782ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0111 23:42:23.951095  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.951345  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:23.951363  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:23.951466  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.951508  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.951686  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (8.682992ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39496]
I0111 23:42:23.955374  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30/status: (2.942585ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0111 23:42:23.955374  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.542098ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39500]
I0111 23:42:23.955805  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-30.1578efcee791cb76: (3.169721ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39496]
I0111 23:42:23.956637  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (4.54562ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39494]
I0111 23:42:23.957872  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (1.75416ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39492]
I0111 23:42:23.958183  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.958427  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:23.958450  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:23.958560  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.958612  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.960535  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (1.211424ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39496]
I0111 23:42:23.962357  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.556657ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39502]
I0111 23:42:23.962682  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35/status: (3.302878ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39500]
I0111 23:42:23.965215  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.847286ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39496]
I0111 23:42:23.965644  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (2.473106ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39502]
I0111 23:42:23.965940  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.966083  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:23.966148  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:23.966243  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.966318  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.969316  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.176927ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0111 23:42:23.969633  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (2.474837ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39504]
I0111 23:42:23.970085  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37/status: (2.953185ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39502]
I0111 23:42:23.971556  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.284006ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39496]
I0111 23:42:23.972887  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (1.349681ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0111 23:42:23.973125  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.973311  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:23.973335  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:23.973452  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.973507  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.975118  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (1.189981ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39504]
I0111 23:42:23.976646  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.384764ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39508]
I0111 23:42:23.978895  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38/status: (3.425259ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0111 23:42:23.980187  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.232266ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39508]
I0111 23:42:23.981856  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (2.254466ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0111 23:42:23.982372  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.982516  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:23.982527  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:23.982713  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.982789  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.984394  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (1.361699ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0111 23:42:23.985250  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39/status: (1.880732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39504]
I0111 23:42:23.985615  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.243857ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39508]
I0111 23:42:23.987058  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.269607ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0111 23:42:23.987671  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (1.425835ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39504]
I0111 23:42:23.988056  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.988493  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:23.988516  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:23.988577  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.485367ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39508]
I0111 23:42:23.988674  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.988716  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:23.990043  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (1.096ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0111 23:42:23.991639  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.566964ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0111 23:42:23.992465  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40/status: (3.317082ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39506]
I0111 23:42:23.993901  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.864281ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39514]
I0111 23:42:23.995511  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (2.665529ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0111 23:42:23.996712  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:23.996813  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.999676ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39514]
I0111 23:42:23.997062  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:23.997086  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:23.997165  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:23.997220  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.000451  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-39.1578efceeac3db98: (2.45295ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39518]
I0111 23:42:24.000606  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (3.119881ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0111 23:42:24.001860  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39/status: (3.931354ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39516]
I0111 23:42:24.002117  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (4.322146ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0111 23:42:24.004687  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (2.434004ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0111 23:42:24.005176  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (2.615951ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0111 23:42:24.007531  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.978122ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0111 23:42:24.008213  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.008431  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:24.008476  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:24.010181  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.010259  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.009853  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (1.958645ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0111 23:42:24.013222  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.535435ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0111 23:42:24.014688  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43/status: (3.148644ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39518]
I0111 23:42:24.015356  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods: (3.301951ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39522]
I0111 23:42:24.017365  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (2.307797ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39518]
I0111 23:42:24.017677  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.017934  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-0
I0111 23:42:24.017950  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-0
I0111 23:42:24.018043  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.018081  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.021398  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.853761ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39524]
I0111 23:42:24.021514  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0/status: (3.1689ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39522]
I0111 23:42:24.023169  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0: (1.179331ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39524]
I0111 23:42:24.023480  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.023661  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-2
I0111 23:42:24.023809  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-2
I0111 23:42:24.023957  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.024023  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.024658  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0: (5.841326ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0111 23:42:24.027844  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.374403ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39522]
I0111 23:42:24.028045  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2: (2.221397ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0111 23:42:24.028492  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2/status: (4.232831ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39524]
I0111 23:42:24.030492  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2: (1.395815ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39522]
I0111 23:42:24.030806  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.031092  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6
I0111 23:42:24.031157  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6
I0111 23:42:24.031269  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.031372  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.034017  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (1.443554ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0111 23:42:24.035048  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6/status: (3.410437ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39522]
I0111 23:42:24.036317  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.228707ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39526]
I0111 23:42:24.038431  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (1.221947ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39526]
I0111 23:42:24.038658  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.038886  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:24.038930  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:24.039040  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.039119  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.043082  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10/status: (3.635133ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39526]
I0111 23:42:24.044132  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-10.1578efcee2680e5f: (3.548374ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39528]
I0111 23:42:24.045366  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (1.810627ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39526]
I0111 23:42:24.045833  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.046077  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:24.046117  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:24.046246  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.046389  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.049513  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (38.831454ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0111 23:42:24.052129  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35/status: (5.350632ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39526]
I0111 23:42:24.052150  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (2.896661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39528]
I0111 23:42:24.053137  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-35.1578efcee953046d: (3.086723ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39530]
I0111 23:42:24.053559  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (13.188159ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39512]
I0111 23:42:24.055094  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (1.990568ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39528]
I0111 23:42:24.055436  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.055704  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:24.055763  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:24.055902  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.055974  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.062353  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-37.1578efcee9c896b8: (5.28181ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39532]
I0111 23:42:24.062659  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37/status: (5.73824ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39530]
I0111 23:42:24.064992  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (8.108547ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0111 23:42:24.065051  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (1.402866ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39530]
I0111 23:42:24.065677  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.066393  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19
I0111 23:42:24.066495  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19
I0111 23:42:24.066632  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.066710  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.069112  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (1.56974ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39532]
I0111 23:42:24.069624  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.481972ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39534]
I0111 23:42:24.070029  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19/status: (2.973578ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0111 23:42:24.071514  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (1.129958ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39510]
I0111 23:42:24.071838  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.072160  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:24.072223  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:24.072552  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.072679  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.075815  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (1.896126ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39532]
I0111 23:42:24.078672  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38/status: (5.689651ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39534]
I0111 23:42:24.078881  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-38.1578efceea3640d8: (3.782381ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39536]
I0111 23:42:24.081163  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (1.540607ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39534]
I0111 23:42:24.081575  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.082884  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:24.082910  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:24.083027  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.083081  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.086139  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41/status: (2.773215ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39534]
I0111 23:42:24.086984  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (1.52896ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39538]
I0111 23:42:24.088528  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.326075ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39532]
I0111 23:42:24.089802  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (2.988903ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39534]
I0111 23:42:24.090049  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.090373  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9
I0111 23:42:24.090396  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9
I0111 23:42:24.090496  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.090564  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.094040  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.252113ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39540]
I0111 23:42:24.094435  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9/status: (2.967012ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39520]
I0111 23:42:24.094662  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (3.217045ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39538]
I0111 23:42:24.097034  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (1.59706ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39538]
I0111 23:42:24.097523  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.097669  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:24.097690  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:24.097787  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.097838  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.100668  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (2.132249ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39540]
I0111 23:42:24.100849  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20/status: (2.632336ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39538]
I0111 23:42:24.102192  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.414335ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39542]
I0111 23:42:24.103365  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (1.301551ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39538]
I0111 23:42:24.103808  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.104364  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42
I0111 23:42:24.104393  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42
I0111 23:42:24.104501  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.104555  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.106027  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (1.197242ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39540]
I0111 23:42:24.107961  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42/status: (3.093097ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39542]
I0111 23:42:24.109851  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (1.336159ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39542]
I0111 23:42:24.110111  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.110251  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:24.110302  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:24.110394  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.110445  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.111937  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (6.749447ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39544]
I0111 23:42:24.115147  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (3.411241ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39540]
I0111 23:42:24.115620  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40/status: (4.388156ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39542]
I0111 23:42:24.117365  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (1.2041ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39542]
I0111 23:42:24.117594  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.117881  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:24.117903  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:24.117986  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.118028  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.121840  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (1.99311ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39546]
I0111 23:42:24.122263  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (3.8859ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39544]
I0111 23:42:24.122921  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21/status: (4.194034ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39542]
I0111 23:42:24.130501  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (1.674756ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39542]
I0111 23:42:24.130801  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.130963  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44
I0111 23:42:24.130982  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44
I0111 23:42:24.131064  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.131108  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.132041  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-40.1578efceeb1e6949: (3.6419ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39546]
I0111 23:42:24.134107  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44/status: (2.631579ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39542]
I0111 23:42:24.134950  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (1.089251ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39544]
I0111 23:42:24.136809  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.763328ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39546]
I0111 23:42:24.137046  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (1.704251ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39542]
I0111 23:42:24.137623  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.137795  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:24.137816  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:24.137887  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.137935  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.138992  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.747614ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39546]
I0111 23:42:24.140820  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (2.430168ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39544]
I0111 23:42:24.141557  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.161853ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39546]
I0111 23:42:24.141985  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45/status: (3.200754ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39542]
I0111 23:42:24.143571  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (1.111279ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39546]
I0111 23:42:24.143977  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.144171  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3
I0111 23:42:24.144214  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3
I0111 23:42:24.144339  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.144408  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.147789  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (2.247657ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39544]
I0111 23:42:24.148312  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3/status: (3.641804ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39546]
I0111 23:42:24.148621  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.192561ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.150196  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (1.33024ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39546]
I0111 23:42:24.150549  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.150709  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8
I0111 23:42:24.150781  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8
I0111 23:42:24.150923  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.151090  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.153309  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (1.376922ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.155213  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8/status: (3.21984ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39544]
I0111 23:42:24.156535  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-8.1578efcee0611254: (3.583447ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.157050  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (1.320755ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39544]
I0111 23:42:24.157421  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.157718  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:24.157780  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:24.157964  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.158042  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.160588  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (2.07302ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.163922  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22/status: (2.538063ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.165634  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (6.739268ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.166719  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (2.078716ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.167169  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.167499  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23
I0111 23:42:24.167538  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23
I0111 23:42:24.167674  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.167762  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.171519  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23/status: (2.723317ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.172165  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (3.8206ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.174500  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (1.453214ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.174926  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.175250  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-11
I0111 23:42:24.175327  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-11
I0111 23:42:24.175540  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.175631  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.178984  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11/status: (2.551405ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.179602  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11: (3.538257ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.182803  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11: (2.341363ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.183634  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.183935  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:24.183948  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:24.184044  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.184086  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.188694  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (4.169435ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.189872  120899 store.go:355] GuaranteedUpdate of /d246db8b-6af8-44a6-8f81-916ee996e115/pods/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22 failed because of a conflict, going to retry
E0111 23:42:24.190054  120899 rest.go:216] >>>> caught error : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-22\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc0105d2ea0), Code:409}}
I0111 23:42:24.190099  120899 update.go:183] Unable to store in database : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-22\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc0105d2ea0), Code:409}}
I0111 23:42:24.190200  120899 trace.go:84] Trace[2005039381]: "Update /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22/status" (started: 2019-01-11 23:42:24.18509908 +0000 UTC m=+67.880692873) (total time: 5.069209ms):
Trace[2005039381]: [79.157µs] [79.157µs] About to convert to expected version
Trace[2005039381]: [227.623µs] [148.466µs] Conversion done
Trace[2005039381]: [233.5µs] [5.877µs] About to store object in database
Trace[2005039381]: [5.069209ms] [4.835709ms] END
I0111 23:42:24.190209  120899 update.go:53] >>>> UpdateResource bad count : &http.Request{Method:"PUT", URL:(*url.URL)(0xc000e67580), Proto:"HTTP/1.1", ProtoMajor:1, ProtoMinor:1, Header:http.Header{"Content-Type":[]string{"application/json"}, "Accept-Encoding":[]string{"gzip"}, "User-Agent":[]string{"scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format"}, "Content-Length":[]string{"1091"}, "Accept":[]string{"application/json, */*"}}, Body:(*http.body)(0xc00eb5a480), GetBody:(func() (io.ReadCloser, error))(nil), ContentLength:1091, TransferEncoding:[]string(nil), Close:false, Host:"127.0.0.1:46609", Form:url.Values(nil), PostForm:url.Values(nil), MultipartForm:(*multipart.Form)(nil), Trailer:http.Header(nil), RemoteAddr:"127.0.0.1:39550", RequestURI:"/api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22/status", TLS:(*tls.ConnectionState)(nil), Cancel:(<-chan struct {})(nil), Response:(*http.Response)(nil), ctx:(*context.valueCtx)(0xc00da81b90)}
I0111 23:42:24.190346  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22/status: (5.383662ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.192722  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (2.021592ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.193417  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.193887  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26
I0111 23:42:24.193934  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26
I0111 23:42:24.194080  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.194176  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.198802  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26/status: (3.314309ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.199523  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (4.616608ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.202470  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (2.305037ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.202912  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.203136  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:24.203252  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:24.203399  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.203472  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.207444  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24/status: (2.854339ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.208069  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (3.950831ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.211090  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (1.70951ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.211472  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.211639  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-1
I0111 23:42:24.211660  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-1
I0111 23:42:24.211789  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.211847  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.213458  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1: (1.315061ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.214066  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1/status: (1.995384ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.215207  120899 cacher.go:598] cacher (*core.Pod): 4 objects queued in incoming channel.
I0111 23:42:24.215233  120899 cacher.go:598] cacher (*core.Pod): 5 objects queued in incoming channel.
I0111 23:42:24.215843  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1: (1.320836ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.216071  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.216414  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4
I0111 23:42:24.216430  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4
I0111 23:42:24.216500  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.216540  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.217236  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (47.002876ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.218831  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (1.173519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.220393  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.818893ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.222357  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4/status: (4.585481ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.224303  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-22.1578efcef536106a: (2.702336ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.225131  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (2.331245ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.225417  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.225561  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12
I0111 23:42:24.225581  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12
I0111 23:42:24.225658  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.225709  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.227322  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.974564ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.231982  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-24.1578efcee5e1baa1: (2.982859ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.232347  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (5.164046ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.232562  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (4.951992ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39554]
I0111 23:42:24.232848  120899 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I0111 23:42:24.233951  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12/status: (6.521421ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.236164  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0: (3.15342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39554]
I0111 23:42:24.236191  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (1.486726ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.237116  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.238676  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.274513ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.238143  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1: (1.367693ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39554]
I0111 23:42:24.240435  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:24.240486  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:24.240648  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.240719  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.241364  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.899498ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.244652  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (3.070629ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.245768  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28/status: (3.766006ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.247306  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (1.151871ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.247536  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.247768  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:24.247783  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:24.247899  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.247940  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.249653  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (1.370559ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.250411  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2: (10.879564ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.250515  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (8.389299ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39558]
I0111 23:42:24.253501  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.761156ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39558]
I0111 23:42:24.254107  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29/status: (5.925256ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.255883  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.878046ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39556]
I0111 23:42:24.256910  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (4.123989ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39550]
I0111 23:42:24.259141  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (1.573104ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39556]
I0111 23:42:24.259141  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (4.556019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.260152  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.260376  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7
I0111 23:42:24.260415  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7
I0111 23:42:24.260554  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.260637  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.263038  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (3.350372ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39556]
I0111 23:42:24.263489  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (2.549827ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.263774  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7/status: (2.86078ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.266532  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-7.1578efcee17d2a82: (4.589459ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39560]
I0111 23:42:24.268325  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (4.212154ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39556]
I0111 23:42:24.268564  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (4.247237ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.269120  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.269377  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:24.269394  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:24.269484  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.269524  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.270634  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (1.801208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39560]
I0111 23:42:24.272772  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27/status: (2.936685ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.273154  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (2.376441ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.273509  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (2.168261ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39560]
I0111 23:42:24.273900  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-27.1578efcee6badae8: (3.099771ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39562]
I0111 23:42:24.276538  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (1.527351ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.276819  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.277137  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:24.277107  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (1.849523ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.277151  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:24.277246  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.277310  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.280080  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.930263ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39564]
I0111 23:42:24.280144  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36/status: (2.406243ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39552]
I0111 23:42:24.280544  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (2.929163ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.282998  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (1.984347ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39566]
I0111 23:42:24.283357  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (1.620015ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.283584  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.283757  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:24.283823  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:24.284728  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11: (1.212257ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39566]
I0111 23:42:24.286164  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.286246  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.286342  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (1.170108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.289227  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.194834ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39568]
I0111 23:42:24.290201  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (2.819661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39570]
I0111 23:42:24.292658  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (5.558736ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39548]
I0111 23:42:24.292910  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32/status: (6.223203ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39564]
I0111 23:42:24.294235  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (3.431962ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39570]
I0111 23:42:24.295575  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (2.185333ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39572]
I0111 23:42:24.295906  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.296050  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (1.165108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39570]
I0111 23:42:24.296109  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5
I0111 23:42:24.296139  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5
I0111 23:42:24.296222  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.296319  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.299028  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.896706ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0111 23:42:24.300436  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5/status: (3.772497ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39572]
I0111 23:42:24.300710  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (2.884645ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39576]
I0111 23:42:24.301475  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (3.07088ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39568]
I0111 23:42:24.303567  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (1.293827ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39576]
I0111 23:42:24.303955  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (2.83052ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39572]
I0111 23:42:24.304295  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.305258  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (1.233556ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39576]
I0111 23:42:24.306403  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13
I0111 23:42:24.306457  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13
I0111 23:42:24.306637  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.306714  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.307151  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (1.441559ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39572]
I0111 23:42:24.308776  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (1.334648ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39578]
I0111 23:42:24.311604  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13/status: (3.25253ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0111 23:42:24.313057  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.980237ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39578]
I0111 23:42:24.313498  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (1.15178ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39574]
I0111 23:42:24.313867  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.314124  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:24.314138  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:24.314214  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.314250  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.315697  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (2.138908ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39572]
I0111 23:42:24.318067  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (3.050589ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39580]
I0111 23:42:24.318307  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.12149ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39572]
I0111 23:42:24.318490  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (2.112064ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39584]
I0111 23:42:24.318893  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33/status: (3.83107ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39578]
I0111 23:42:24.322464  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (2.867982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39580]
I0111 23:42:24.322687  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.322987  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:24.323045  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:24.323180  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.323267  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.327524  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.020901ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0111 23:42:24.328500  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (8.72519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39582]
I0111 23:42:24.330189  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (5.340039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.331607  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34/status: (7.994488ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39580]
I0111 23:42:24.334402  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (1.513632ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.334717  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.334968  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15
I0111 23:42:24.335010  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15
I0111 23:42:24.335190  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.335263  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.338607  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (1.248225ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0111 23:42:24.339760  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15/status: (4.152701ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.340111  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.650883ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39590]
I0111 23:42:24.341013  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (11.523665ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39582]
I0111 23:42:24.343202  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (1.494096ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.343524  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.343720  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:24.343777  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:24.343909  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.343977  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.345506  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (1.278223ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.347759  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (6.049472ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39590]
I0111 23:42:24.349531  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31/status: (5.163595ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0111 23:42:24.351298  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (1.29995ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0111 23:42:24.351561  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.351721  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15
I0111 23:42:24.351735  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15
I0111 23:42:24.351866  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.351959  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.353260  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (2.336348ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39590]
I0111 23:42:24.354028  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15/status: (1.745035ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0111 23:42:24.355255  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (2.400165ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39592]
I0111 23:42:24.355766  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (1.294957ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0111 23:42:24.356007  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.356068  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (2.057782ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39590]
I0111 23:42:24.358136  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (1.284525ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0111 23:42:24.359048  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:24.359068  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:24.359187  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.359228  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.360717  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-31.1578efcee8152794: (10.504946ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.363989  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36/status: (4.257912ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0111 23:42:24.364486  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (4.461619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0111 23:42:24.364881  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (5.372749ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39592]
I0111 23:42:24.367617  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (1.590324ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39592]
I0111 23:42:24.368028  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (2.395804ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0111 23:42:24.368265  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.370244  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (1.402669ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0111 23:42:24.372059  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (1.3443ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0111 23:42:24.373649  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (1.231444ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0111 23:42:24.375154  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (1.151897ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0111 23:42:24.375461  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:24.375843  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:24.375951  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.375988  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.379526  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (2.074863ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0111 23:42:24.379959  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (3.37308ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0111 23:42:24.380584  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27/status: (3.636344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0111 23:42:24.383210  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (1.55619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0111 23:42:24.383608  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.384069  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (2.73088ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39588]
I0111 23:42:24.384578  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:24.384590  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:24.384685  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.384721  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.387166  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (1.333059ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0111 23:42:24.387885  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (2.505361ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0111 23:42:24.389432  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20/status: (4.058792ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0111 23:42:24.391509  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (1.730862ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0111 23:42:24.392007  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (3.808702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0111 23:42:24.392566  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.392802  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:24.392818  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:24.392903  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.392943  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.396672  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (3.419495ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0111 23:42:24.397191  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35/status: (3.552976ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0111 23:42:24.397642  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (3.287891ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0111 23:42:24.400623  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (1.788554ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0111 23:42:24.401359  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (2.925496ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0111 23:42:24.401836  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.402111  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:24.402125  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:24.402199  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.402254  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.405493  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (2.649134ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0111 23:42:24.405909  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (2.69843ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0111 23:42:24.406210  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43/status: (2.727301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0111 23:42:24.407666  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (1.427774ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0111 23:42:24.409923  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (1.79546ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39594]
I0111 23:42:24.410363  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (3.794554ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0111 23:42:24.412815  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.414216  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-2
I0111 23:42:24.414242  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-2
I0111 23:42:24.414364  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.414421  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.417861  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (4.39114ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0111 23:42:24.419981  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (1.693273ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0111 23:42:24.420552  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2/status: (5.117319ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0111 23:42:24.420979  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2: (5.933256ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0111 23:42:24.423693  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2: (1.60026ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0111 23:42:24.424208  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (2.496138ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0111 23:42:24.424676  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.425274  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:24.425324  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:24.425428  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.425479  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.428391  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (2.537787ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0111 23:42:24.428780  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (3.906753ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0111 23:42:24.429239  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41/status: (3.073806ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0111 23:42:24.431963  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (1.856438ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0111 23:42:24.433594  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (1.318349ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39596]
I0111 23:42:24.434027  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (3.539251ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0111 23:42:24.434309  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.435881  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (1.188997ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0111 23:42:24.436106  120899 preemption_test.go:598] Cleaning up all pods...
I0111 23:42:24.439374  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3
I0111 23:42:24.439402  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3
I0111 23:42:24.439519  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.439573  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.442729  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (2.482977ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0111 23:42:24.443446  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3/status: (2.820536ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0111 23:42:24.447221  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (3.093982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0111 23:42:24.447559  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.448472  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0: (12.206581ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39598]
I0111 23:42:24.448932  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48
I0111 23:42:24.448956  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48
I0111 23:42:24.449080  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.449131  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.452676  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48/status: (2.479597ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0111 23:42:24.453171  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (3.40206ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.455486  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (1.534382ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0111 23:42:24.455945  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.456222  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42
I0111 23:42:24.456258  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42
I0111 23:42:24.456384  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.456489  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.459495  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42/status: (2.588499ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0111 23:42:24.460575  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-15.1578efceffc63ec5: (99.0595ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.462425  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (1.213806ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0111 23:42:24.462736  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.463046  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:24.463076  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:24.463179  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.464168  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.464105  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (5.401079ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.465435  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1: (4.762421ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0111 23:42:24.468174  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (2.495561ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.472112  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-36.1578efcefc519893: (5.015439ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.472141  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49/status: (7.303712ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0111 23:42:24.474881  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (1.288561ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0111 23:42:24.475392  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.475802  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:24.475873  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10
I0111 23:42:24.476054  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.476197  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.479156  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-27.1578efcee6badae8: (5.825635ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.479224  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (2.441769ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.479550  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10/status: (2.927878ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0111 23:42:24.479621  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2: (13.670431ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39606]
I0111 23:42:24.482668  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-20.1578efcef19f6fbc: (2.629713ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0111 23:42:24.485757  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (5.621112ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.486254  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-35.1578efcee953046d: (2.996375ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0111 23:42:24.487917  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.488169  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:24.488202  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:24.488327  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.488373  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.489609  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (9.581855ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.490976  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49/status: (2.164413ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0111 23:42:24.491326  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (2.537203ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.493882  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-43.1578efceec671369: (3.359367ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0111 23:42:24.495088  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (2.849555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0111 23:42:24.495963  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.496246  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5
I0111 23:42:24.496329  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5
I0111 23:42:24.496522  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.496682  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.497347  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (6.867242ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.498719  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (1.284252ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.505029  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (6.994795ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
E0111 23:42:24.505342  120899 rest.go:216] >>>> caught error : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-5\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc011701b00), Code:409}}
I0111 23:42:24.505928  120899 update.go:183] Unable to store in database : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"Operation cannot be fulfilled on pods \"ppod-5\": the object has been modified; please apply your changes to the latest version and try again", Reason:"Conflict", Details:(*v1.StatusDetails)(0xc011701b00), Code:409}}
I0111 23:42:24.506339  120899 trace.go:84] Trace[334867467]: "Update /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5/status" (started: 2019-01-11 23:42:24.49762884 +0000 UTC m=+68.193222643) (total time: 8.426057ms):
Trace[334867467]: [73.168µs] [73.168µs] About to convert to expected version
Trace[334867467]: [217.461µs] [144.293µs] Conversion done
Trace[334867467]: [222.733µs] [5.272µs] About to store object in database
Trace[334867467]: [8.426057ms] [8.203324ms] END
I0111 23:42:24.506524  120899 update.go:53] >>>> UpdateResource bad count : &http.Request{Method:"PUT", URL:(*url.URL)(0xc003c8ba80), Proto:"HTTP/1.1", ProtoMajor:1, ProtoMinor:1, Header:http.Header{"User-Agent":[]string{"scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format"}, "Content-Length":[]string{"1087"}, "Accept":[]string{"application/json, */*"}, "Content-Type":[]string{"application/json"}, "Accept-Encoding":[]string{"gzip"}}, Body:(*http.body)(0xc0124f4400), GetBody:(func() (io.ReadCloser, error))(nil), ContentLength:1087, TransferEncoding:[]string(nil), Close:false, Host:"127.0.0.1:46609", Form:url.Values(nil), PostForm:url.Values(nil), MultipartForm:(*multipart.Form)(nil), Trailer:http.Header(nil), RemoteAddr:"127.0.0.1:39608", RequestURI:"/api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5/status", TLS:(*tls.ConnectionState)(nil), Cancel:(<-chan struct {})(nil), Response:(*http.Response)(nil), ctx:(*context.valueCtx)(0xc011bf4450)}
I0111 23:42:24.505878  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-2.1578efceed391acc: (7.828967ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0111 23:42:24.509629  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5/status: (12.336847ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39608]
I0111 23:42:24.512673  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-5\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0117acde0), Code:404}}
I0111 23:42:24.512967  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (1.335025ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
E0111 23:42:24.513328  120899 scheduler.go:292] Error getting the updated preemptor pod object: pods "ppod-5" not found
I0111 23:42:24.513641  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:24.513667  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:24.513822  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.513883  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.514691  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (8.221802ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.517191  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (2.226387ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.520387  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-41.1578efcef0be4123: (6.260116ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0111 23:42:24.521976  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32/status: (4.43515ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.526250  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (3.865362ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.526664  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.526877  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:24.526891  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:24.527008  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.527056  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.527527  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-3.1578efcef465fc6e: (6.555392ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0111 23:42:24.530848  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.726035ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0111 23:42:24.534354  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-42.1578efcef205ec92: (2.659493ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0111 23:42:24.536722  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.646879ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0111 23:42:24.539906  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (12.054373ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.540443  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47/status: (12.529551ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.542818  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-10.1578efcee2680e5f: (5.382281ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0111 23:42:24.545810  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (2.780145ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.546070  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.547111  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-49.1578efcf077506cf: (3.007718ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39612]
I0111 23:42:24.548812  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:24.548876  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:24.549047  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.549125  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.550298  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-5.1578efcefd73efca: (2.535273ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.552891  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (33.02604ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.553447  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45/status: (3.302099ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.553875  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (4.043841ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39616]
I0111 23:42:24.556132  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-32.1578efcefcda4f2d: (5.193402ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.557262  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (2.048645ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.559133  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.559271  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:24.559304  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:24.559385  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.559469  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.559735  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.913663ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.561046  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (5.972205ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.561518  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (1.780792ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.562411  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47/status: (2.258764ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39616]
I0111 23:42:24.565464  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (2.000381ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39616]
I0111 23:42:24.566592  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (4.693503ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.566561  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-45.1578efcef40345ea: (5.960043ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.567110  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.567309  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:24.567829  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:24.568441  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.568527  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.571707  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-47.1578efcf0b34c793: (2.890222ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.571916  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (2.46411ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.574261  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (7.134464ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.576420  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.714486ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.576422  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46/status: (4.360403ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39618]
I0111 23:42:24.578719  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (1.198604ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39618]
I0111 23:42:24.579056  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.579339  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:24.579387  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:24.579527  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.579600  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.582691  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-11: (8.042016ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.583122  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21/status: (2.93642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.583150  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (3.311084ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.585863  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-21.1578efcef2d38b1f: (5.386992ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39620]
I0111 23:42:24.587155  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (3.55595ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39610]
I0111 23:42:24.587591  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.587774  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:24.587790  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:24.587917  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.587959  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.590765  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (1.241716ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.591701  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-12: (7.840766ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.592321  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-46.1578efcf0dad8edf: (3.559206ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.592332  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46/status: (4.141162ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39620]
I0111 23:42:24.594657  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (1.534898ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.595041  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.595204  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:24.595230  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:24.595543  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.595636  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.598542  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34/status: (2.28702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.599965  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (2.804984ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.600798  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-34.1578efceff0f3502: (3.599705ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0111 23:42:24.601296  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-13: (8.455787ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39586]
I0111 23:42:24.601325  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (2.144826ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.603553  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.603871  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:24.603934  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:24.604078  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.604159  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.606546  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (1.997149ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.607075  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37/status: (2.082087ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39626]
I0111 23:42:24.609398  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-37.1578efcee9c896b8: (4.42384ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.610629  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (2.711129ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39626]
I0111 23:42:24.610658  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-14: (7.793083ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39624]
I0111 23:42:24.611063  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.611330  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19
I0111 23:42:24.611379  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19
I0111 23:42:24.611522  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.611600  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.615832  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19/status: (3.22741ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.615963  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (4.054521ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.616234  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-19.1578efceefc47341: (3.319125ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39630]
I0111 23:42:24.617562  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-15: (6.214696ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.619082  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (1.681585ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.619404  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.619665  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:24.619729  120899 scheduler.go:454] Attempting to schedule pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:24.619959  120899 factory.go:1070] Unable to schedule preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0111 23:42:24.620112  120899 factory.go:1175] Updating pod condition for preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I0111 23:42:24.622874  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (2.512475ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.623482  120899 wrap.go:47] PUT /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29/status: (2.810013ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.624938  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-16: (6.580375ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.626166  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (1.546019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.627425  120899 generic_scheduler.go:1108] Node node1 is a potential node for preemption.
I0111 23:42:24.627452  120899 wrap.go:47] PATCH /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events/ppod-29.1578efcefa91d37e: (6.536689ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39632]
I0111 23:42:24.629934  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:24.630045  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-17
I0111 23:42:24.632152  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.7165ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39614]
I0111 23:42:24.632186  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-17: (6.161076ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.635474  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:24.635571  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-18
I0111 23:42:24.637340  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-18: (4.705484ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.638044  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.517287ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.641695  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19
I0111 23:42:24.641803  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-19
I0111 23:42:24.643965  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-19: (5.895001ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.644717  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.273383ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.648862  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:24.648964  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-20
I0111 23:42:24.651631  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.277436ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.654962  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-20: (9.946767ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.660462  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:24.660571  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-21
I0111 23:42:24.662916  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.986221ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.665756  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-21: (9.230314ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.669383  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:24.669537  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-22
I0111 23:42:24.671692  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-22: (5.479642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.672103  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.092737ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.677018  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-23: (4.696732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.680574  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:24.680630  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-24
I0111 23:42:24.683131  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-24: (5.628225ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.683820  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.171218ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.686826  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:24.686977  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-25
I0111 23:42:24.689231  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.729001ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.690816  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-25: (7.006991ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.694431  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26
I0111 23:42:24.694480  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-26
I0111 23:42:24.696177  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-26: (5.034786ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.697490  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.610797ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.700169  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:24.700421  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-27
I0111 23:42:24.703519  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-27: (6.92343ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.704056  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.220616ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.707572  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:24.707693  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-28
I0111 23:42:24.709635  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-28: (5.669068ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.709857  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.631704ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.714686  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:24.714783  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-29
I0111 23:42:24.715600  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-29: (5.469921ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.717152  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.790156ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.722027  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:24.722119  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-30
I0111 23:42:24.725849  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-30: (9.790613ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.731356  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:24.731415  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-31
I0111 23:42:24.734168  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (6.250555ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.735660  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-31: (9.193479ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.738450  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.178282ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.740618  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:24.740724  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-32
I0111 23:42:24.742428  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-32: (6.108308ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.746077  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (4.752845ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.747313  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:24.747366  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-33
I0111 23:42:24.750088  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-33: (7.051585ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.750562  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.835662ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.755472  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:24.755603  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-34
I0111 23:42:24.757856  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-34: (6.024738ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.758144  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.610769ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.761793  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:24.761833  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-35
I0111 23:42:24.764186  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-35: (5.922568ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.764922  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.798927ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.768454  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:24.768692  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-36
I0111 23:42:24.771720  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.3005ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.774012  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-36: (9.320949ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.778562  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:24.778721  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-37
I0111 23:42:24.780915  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.891511ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.782824  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-37: (7.783351ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.787570  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:24.787644  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-38
I0111 23:42:24.791365  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-38: (8.075372ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.791878  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.884871ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.796429  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:24.796595  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-39
I0111 23:42:24.797186  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-39: (5.114489ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.800652  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.721596ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.801010  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:24.801043  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-40
I0111 23:42:24.802531  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-40: (4.931422ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.805979  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:24.806475  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-41
I0111 23:42:24.808054  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-41: (4.772619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.809859  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (8.200236ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.811320  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42
I0111 23:42:24.811357  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-42
I0111 23:42:24.813203  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-42: (4.731639ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.813204  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.346086ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.815677  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.770602ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.817788  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:24.818668  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-43
I0111 23:42:24.818491  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-43: (4.702518ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.821098  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.02313ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.822790  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44
I0111 23:42:24.822898  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-44
I0111 23:42:24.824393  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-44: (5.284454ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.826125  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.644609ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.828234  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:24.828399  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-45
I0111 23:42:24.830043  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-45: (5.143996ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.831533  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.715353ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.834835  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:24.834926  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-46
I0111 23:42:24.836964  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.737257ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.837063  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-46: (5.330852ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.841593  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:24.841692  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-47
I0111 23:42:24.841894  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-47: (4.46165ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.843779  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (1.534405ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.846697  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48
I0111 23:42:24.847075  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-48
I0111 23:42:24.850662  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-48: (7.710332ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.851954  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (3.779045ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.856637  120899 scheduling_queue.go:821] About to try and schedule pod preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:24.856756  120899 scheduler.go:450] Skip schedule deleting pod: preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/ppod-49
I0111 23:42:24.857576  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-49: (6.478032ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.857787  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:24.859858  120899 wrap.go:47] POST /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/events: (2.679535ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39628]
I0111 23:42:24.863044  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-0: (4.689293ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.863866  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
E0111 23:42:24.865048  120899 rest.go:216] >>>> caught error : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"rpod-1\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc012a3e420), Code:404}}
I0111 23:42:24.865098  120899 delete.go:145] Unable to delete from database : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"rpod-1\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc012a3e420), Code:404}}
I0111 23:42:24.865226  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/rpod-1: (1.264128ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.865556  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:24.865593  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:24.865936  120899 reflector.go:215] k8s.io/client-go/informers/factory.go:132: forcing resync
I0111 23:42:24.870403  120899 wrap.go:47] DELETE /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/preemptor-pod: (4.678716ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.872863  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-0\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0126dd800), Code:404}}
I0111 23:42:24.873089  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-0: (1.157612ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.875690  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-1\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0128e1920), Code:404}}
I0111 23:42:24.875999  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-1: (1.305836ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.878707  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-2\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0128bb8c0), Code:404}}
I0111 23:42:24.879071  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-2: (1.328876ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.881693  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-3\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0128bbda0), Code:404}}
I0111 23:42:24.881975  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-3: (1.29098ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.884685  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-4\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc0129c2cc0), Code:404}}
I0111 23:42:24.884920  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-4: (1.294535ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.888106  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-5\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc012a9e720), Code:404}}
I0111 23:42:24.888366  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-5: (1.250796ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.890968  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-6\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc012a4ef00), Code:404}}
I0111 23:42:24.891199  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-6: (1.253398ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.893986  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-7\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc012a4f080), Code:404}}
I0111 23:42:24.894220  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-7: (1.326258ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.896853  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-8\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc012a4f1a0), Code:404}}
I0111 23:42:24.897000  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-8: (1.161465ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.899533  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-9\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc012ef2660), Code:404}}
I0111 23:42:24.899796  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-9: (1.225792ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.902631  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMeta{SelfLink:"", ResourceVersion:"", Continue:""}, Status:"Failure", Message:"pods \"ppod-10\" not found", Reason:"NotFound", Details:(*v1.StatusDetails)(0xc00df2a2a0), Code:404}}
I0111 23:42:24.902864  120899 wrap.go:47] GET /api/v1/namespaces/preemption-race887eaa8f-15fa-11e9-86ce-0242ac110002/pods/ppod-10: (1.365192ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:39622]
I0111 23:42:24.905594  120899 get.go:70] Unable to get resource : &errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ListMeta:v1.ListMe