PR | wzshiming: Fix staging/src/k8s.io/apiserver/pkg/server/filters flake test timeout |
Result | FAILURE |
Tests | 0 failed / 135 succeeded |
Started | |
Elapsed | 18m34s |
Revision | 7cc1daf7682e5be1da328b4e185f4fe389bcac56 |
Refs |
99344 |
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/shell_not_expected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/unsupported_shell_type
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/accept_a_valid_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_negative_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_non-string_port
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_too_large_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitCertPhaseCSR
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitCertPhaseCSR/fails_on_CSR
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitCertPhaseCSR/fails_on_all
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitCertPhaseCSR/generate_CSR
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_v1alpha1_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_v1alpha2_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_v1alpha3_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_current_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_v1beta1_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_v1beta2_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/don't_allow_mixed_arguments_v1beta1
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/don't_allow_mixed_arguments_v1beta2
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/fail_on_non_existing_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/feature_gate_IPv6DualStack=true
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/feature_gate_PublicKeysECDSA=true
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/no_feature_gates_passed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/invalid_semantic_version_string_is_detected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/valid_version_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_non-lowercase
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_size
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/valid_token_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed/discovery-token_and_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_and_discovery-file_can't_both_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_or_discovery-file_must_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/invalid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/valid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName/valid_node_name
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/invalid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/no_token_provided
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerate
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerateTypoError
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/default_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/invalid_output_option
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/short_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/json_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/yaml_output
test-cmd run_RESTMapper_evaluation_tests
test-cmd run_assert_categories_tests
test-cmd run_assert_short_name_tests
test-cmd run_authorization_tests
test-cmd run_certificates_tests
test-cmd run_client_config_tests
test-cmd run_cluster_management_tests
test-cmd run_clusterroles_tests
test-cmd run_configmap_tests
test-cmd run_convert_tests
test-cmd run_crd_tests
test-cmd run_create_job_tests
test-cmd run_create_secret_tests
test-cmd run_daemonset_history_tests
test-cmd run_daemonset_tests
test-cmd run_deployment_tests
test-cmd run_exec_credentials_tests
test-cmd run_impersonation_tests
test-cmd run_job_tests
test-cmd run_kubectl_all_namespace_tests
test-cmd run_kubectl_apply_deployments_tests
test-cmd run_kubectl_apply_tests
test-cmd run_kubectl_config_set_cluster_tests
test-cmd run_kubectl_config_set_credentials_tests
test-cmd run_kubectl_config_set_tests
test-cmd run_kubectl_create_error_tests
test-cmd run_kubectl_create_filter_tests
test-cmd run_kubectl_create_kustomization_directory_tests
test-cmd run_kubectl_debug_node_tests
test-cmd run_kubectl_debug_pod_tests
test-cmd run_kubectl_delete_allnamespaces_tests
test-cmd run_kubectl_diff_same_names
test-cmd run_kubectl_diff_tests
test-cmd run_kubectl_exec_pod_tests
test-cmd run_kubectl_exec_resource_name_tests
test-cmd run_kubectl_explain_tests
test-cmd run_kubectl_get_tests
test-cmd run_kubectl_local_proxy_tests
test-cmd run_kubectl_request_timeout_tests
test-cmd run_kubectl_run_tests
test-cmd run_kubectl_server_side_apply_tests
test-cmd run_kubectl_sort_by_tests
test-cmd run_kubectl_version_tests
test-cmd run_lists_tests
test-cmd run_multi_resources_tests
test-cmd run_namespace_tests
test-cmd run_nodes_tests
test-cmd run_persistent_volume_claims_tests
test-cmd run_persistent_volumes_tests
test-cmd run_plugins_tests
test-cmd run_pod_templates_tests
test-cmd run_pod_tests
test-cmd run_rc_tests
test-cmd run_resource_aliasing_tests
test-cmd run_retrieve_multiple_tests
test-cmd run_role_tests
test-cmd run_rs_tests
test-cmd run_save_config_tests
test-cmd run_secrets_test
test-cmd run_service_accounts_tests
test-cmd run_service_tests
test-cmd run_stateful_set_tests
test-cmd run_statefulset_history_tests
test-cmd run_storage_class_tests
test-cmd run_swagger_tests
test-cmd run_template_output_tests
test-cmd run_wait_tests
... skipping 70 lines ... Recording: record_command_canary Running command: record_command_canary +++ Running case: test-cmd.record_command_canary +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: record_command_canary /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh: line 156: bogus-expected-to-fail: command not found !!! [0223 09:42:23] Call tree: !!! [0223 09:42:23] 1: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:47 record_command_canary(...) !!! [0223 09:42:23] 2: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:112 eVal(...) !!! [0223 09:42:23] 3: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:132 juLog(...) !!! [0223 09:42:23] 4: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:160 record_command(...) !!! [0223 09:42:23] 5: hack/make-rules/test-cmd.sh:35 source(...) +++ exit code: 1 +++ error: 1 +++ [0223 09:42:23] Running kubeadm tests +++ [0223 09:42:28] Building go targets for linux/amd64: cmd/kubeadm +++ [0223 09:43:12] Running tests without code coverage {"Time":"2021-02-23T09:44:38.098126338Z","Action":"output","Package":"k8s.io/kubernetes/cmd/kubeadm/test/cmd","Output":"ok \tk8s.io/kubernetes/cmd/kubeadm/test/cmd\t49.828s\n"} ✓ cmd/kubeadm/test/cmd (49.831s) ... skipping 372 lines ... I0223 09:46:59.360213 56228 client.go:360] parsed scheme: "passthrough" I0223 09:46:59.360330 56228 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{http://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>} I0223 09:46:59.360345 56228 clientconn.go:948] ClientConn switching balancer to "pick_first" +++ [0223 09:47:05] Generate kubeconfig for controller-manager +++ [0223 09:47:05] Starting controller-manager I0223 09:47:05.852544 59929 serving.go:347] Generated self-signed cert in-memory W0223 09:47:06.744670 59929 authentication.go:410] failed to read in-cluster kubeconfig for delegated authentication: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W0223 09:47:06.744732 59929 authentication.go:307] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. W0223 09:47:06.744741 59929 authentication.go:331] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. W0223 09:47:06.744755 59929 authorization.go:216] failed to read in-cluster kubeconfig for delegated authorization: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W0223 09:47:06.744778 59929 authorization.go:184] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. I0223 09:47:06.744808 59929 controllermanager.go:174] Version: v1.21.0-alpha.3.453+90a973ca91c4e0 I0223 09:47:06.746643 59929 secure_serving.go:197] Serving securely on [::]:10257 I0223 09:47:06.746814 59929 tlsconfig.go:240] Starting DynamicServingCertificateController I0223 09:47:06.748755 59929 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 I0223 09:47:06.749255 59929 leaderelection.go:243] attempting to acquire leader lease kube-system/kube-controller-manager... ... skipping 9 lines ... W0223 09:47:07.230472 59929 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. W0223 09:47:07.230538 59929 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0223 09:47:07.230666 59929 controllermanager.go:540] Started "statefulset" I0223 09:47:07.230817 59929 stateful_set.go:146] Starting stateful set controller I0223 09:47:07.230840 59929 shared_informer.go:240] Waiting for caches to sync for stateful set I0223 09:47:07.230980 59929 node_lifecycle_controller.go:76] Sending events to api server E0223 09:47:07.231009 59929 core.go:231] failed to start cloud node lifecycle controller: no cloud provider provided W0223 09:47:07.231019 59929 controllermanager.go:532] Skipping "cloud-node-lifecycle" W0223 09:47:07.231309 59929 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0223 09:47:07.231598 59929 controllermanager.go:540] Started "persistentvolume-expander" I0223 09:47:07.231645 59929 expand_controller.go:310] Starting expand controller I0223 09:47:07.231663 59929 shared_informer.go:240] Waiting for caches to sync for expand W0223 09:47:07.231819 59929 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. ... skipping 2 lines ... I0223 09:47:07.231955 59929 controllermanager.go:540] Started "csrcleaner" I0223 09:47:07.232001 59929 ttlafterfinished_controller.go:109] Starting TTL after finished controller I0223 09:47:07.232014 59929 shared_informer.go:240] Waiting for caches to sync for TTL after finished I0223 09:47:07.232041 59929 cleaner.go:82] Starting CSR cleaner controller W0223 09:47:07.232220 59929 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. W0223 09:47:07.232281 59929 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. E0223 09:47:07.232313 59929 core.go:91] Failed to start service controller: WARNING: no cloud provider provided, services of type LoadBalancer will fail W0223 09:47:07.232323 59929 controllermanager.go:532] Skipping "service" W0223 09:47:07.232720 59929 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0223 09:47:07.232739 59929 controllermanager.go:540] Started "endpoint" I0223 09:47:07.232883 59929 endpoints_controller.go:184] Starting endpoint controller I0223 09:47:07.232900 59929 shared_informer.go:240] Waiting for caches to sync for endpoint W0223 09:47:07.240037 59929 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. ... skipping 57 lines ... I0223 09:47:07.253901 59929 controllermanager.go:540] Started "replicationcontroller" I0223 09:47:07.254086 59929 replica_set.go:182] Starting replicationcontroller controller I0223 09:47:07.254109 59929 shared_informer.go:240] Waiting for caches to sync for ReplicationController W0223 09:47:07.254184 59929 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. Client Version: version.Info{Major:"1", Minor:"21+", GitVersion:"v1.21.0-alpha.3.453+90a973ca91c4e0", GitCommit:"90a973ca91c4e0cb1fde9333942f45532cf4ced8", GitTreeState:"clean", BuildDate:"2021-02-23T08:42:04Z", GoVersion:"go1.15.8", Compiler:"gc", Platform:"linux/amd64"} Server Version: version.Info{Major:"1", Minor:"21+", GitVersion:"v1.21.0-alpha.3.453+90a973ca91c4e0", GitCommit:"90a973ca91c4e0cb1fde9333942f45532cf4ced8", GitTreeState:"clean", BuildDate:"2021-02-23T08:42:04Z", GoVersion:"go1.15.8", Compiler:"gc", Platform:"linux/amd64"} The Service "kubernetes" is invalid: spec.clusterIPs: Invalid value: []string{"10.0.0.1"}: failed to allocated ip:10.0.0.1 with error:provided IP is already allocated I0223 09:47:07.706450 59929 resource_quota_monitor.go:229] QuotaMonitor created object count evaluator for controllerrevisions.apps I0223 09:47:07.706509 59929 resource_quota_monitor.go:229] QuotaMonitor created object count evaluator for statefulsets.apps W0223 09:47:07.706616 59929 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0223 09:47:07.706638 59929 resource_quota_monitor.go:229] QuotaMonitor created object count evaluator for ingresses.networking.k8s.io W0223 09:47:07.706695 59929 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0223 09:47:07.706719 59929 resource_quota_monitor.go:229] QuotaMonitor created object count evaluator for ingresses.extensions ... skipping 82 lines ... I0223 09:47:07.808538 59929 shared_informer.go:247] Caches are synced for deployment I0223 09:47:07.808906 59929 shared_informer.go:247] Caches are synced for certificate-csrapproving I0223 09:47:07.811663 59929 shared_informer.go:247] Caches are synced for cronjob I0223 09:47:07.811998 59929 shared_informer.go:247] Caches are synced for ClusterRoleAggregator I0223 09:47:07.812405 59929 shared_informer.go:247] Caches are synced for PV protection I0223 09:47:07.813161 59929 shared_informer.go:247] Caches are synced for PVC protection E0223 09:47:07.823013 59929 clusterroleaggregation_controller.go:181] admin failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "admin": the object has been modified; please apply your changes to the latest version and try again I0223 09:47:07.831088 59929 shared_informer.go:247] Caches are synced for stateful set I0223 09:47:07.831717 59929 shared_informer.go:247] Caches are synced for expand I0223 09:47:07.832802 59929 shared_informer.go:247] Caches are synced for TTL after finished I0223 09:47:07.833088 59929 shared_informer.go:247] Caches are synced for endpoint E0223 09:47:07.837431 59929 clusterroleaggregation_controller.go:181] admin failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "admin": the object has been modified; please apply your changes to the latest version and try again I0223 09:47:07.842100 59929 shared_informer.go:247] Caches are synced for job { "major": "1", "minor": "21+", "gitVersion": "v1.21.0-alpha.3.453+90a973ca91c4e0", "gitCommit": "90a973ca91c4e0cb1fde9333942f45532cf4ced8", ... skipping 2 lines ... "goVersion": "go1.15.8", "compiler": "gc", "platform": "linux/amd64" }I0223 09:47:07.940845 59929 shared_informer.go:247] Caches are synced for service account I0223 09:47:07.945226 56228 controller.go:611] quota admission added evaluator for: serviceaccounts +++ [0223 09:47:08] Testing kubectl version: check client only output matches expected output W0223 09:47:08.068494 59929 actual_state_of_world.go:534] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="127.0.0.1" does not exist I0223 09:47:08.107974 59929 shared_informer.go:247] Caches are synced for resource quota I0223 09:47:08.109162 59929 shared_informer.go:247] Caches are synced for TTL I0223 09:47:08.109817 59929 shared_informer.go:247] Caches are synced for persistent volume I0223 09:47:08.110977 59929 shared_informer.go:247] Caches are synced for attach detach I0223 09:47:08.111242 59929 shared_informer.go:247] Caches are synced for GC I0223 09:47:08.142157 59929 shared_informer.go:247] Caches are synced for daemon sets ... skipping 107 lines ... +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_RESTMapper_evaluation_tests +++ [0223 09:47:12] Creating namespace namespace-1614073632-1422 namespace/namespace-1614073632-1422 created Context "test" modified. +++ [0223 09:47:12] Testing RESTMapper +++ [0223 09:47:13] "kubectl get unknownresourcetype" returns error as expected: error: the server doesn't have a resource type "unknownresourcetype" +++ exit code: 0 NAME SHORTNAMES APIVERSION NAMESPACED KIND bindings v1 true Binding componentstatuses cs v1 false ComponentStatus configmaps cm v1 true ConfigMap endpoints ep v1 true Endpoints ... skipping 62 lines ... namespace/namespace-1614073637-2138 created Context "test" modified. +++ [0223 09:47:17] Testing clusterroles [32mrbac.sh:29: Successful get clusterroles/cluster-admin {{.metadata.name}}: cluster-admin (B[m[32mrbac.sh:30: Successful get clusterrolebindings/cluster-admin {{.metadata.name}}: cluster-admin (B[mSuccessful message:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created (dry run) clusterrole.rbac.authorization.k8s.io/pod-admin created (server dry run) Successful message:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:42: Successful get clusterrole/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[mSuccessful message:warning: deleting cluster-scoped resources, not scoped to the provided namespace clusterrole.rbac.authorization.k8s.io "pod-admin" deleted ... skipping 18 lines ... (B[mclusterrole.rbac.authorization.k8s.io/url-reader created [32mrbac.sh:61: Successful get clusterrole/url-reader {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: get: (B[m[32mrbac.sh:62: Successful get clusterrole/url-reader {{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}: /logs/*:/healthz/*: (B[mclusterrole.rbac.authorization.k8s.io/aggregation-reader created [32mrbac.sh:64: Successful get clusterrole/aggregation-reader {{.metadata.name}}: aggregation-reader (B[mSuccessful message:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin created (server dry run) Successful message:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created [32mrbac.sh:77: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: (B[mclusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (server dry run) [32mrbac.sh:80: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: ... skipping 63 lines ... [32mrbac.sh:102: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin:foo:test-all-user: (B[m[32mrbac.sh:103: Successful get clusterrolebinding/super-group {{range.subjects}}{{.name}}:{{end}}: the-group:foo:test-all-user: (B[m[32mrbac.sh:104: Successful get clusterrolebinding/super-sa {{range.subjects}}{{.name}}:{{end}}: sa-name:foo:test-all-user: (B[mrolebinding.rbac.authorization.k8s.io/admin created (dry run) rolebinding.rbac.authorization.k8s.io/admin created (server dry run) Successful message:Error from server (NotFound): rolebindings.rbac.authorization.k8s.io "admin" not found has: not found rolebinding.rbac.authorization.k8s.io/admin created [32mrbac.sh:113: Successful get rolebinding/admin {{.roleRef.kind}}: ClusterRole (B[m[32mrbac.sh:114: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin: (B[mrolebinding.rbac.authorization.k8s.io/admin subjects updated [32mrbac.sh:116: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin:foo: ... skipping 29 lines ... message:Warning: rbac.authorization.k8s.io/v1beta1 Role is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 Role No resources found in namespace-1614073645-30821 namespace. has:Role is deprecated Successful message:Warning: rbac.authorization.k8s.io/v1beta1 Role is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 Role No resources found in namespace-1614073645-30821 namespace. Error: 1 warning received has:Role is deprecated Successful message:Warning: rbac.authorization.k8s.io/v1beta1 Role is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 Role No resources found in namespace-1614073645-30821 namespace. Error: 1 warning received has:Error: 1 warning received role.rbac.authorization.k8s.io/pod-admin created (dry run) role.rbac.authorization.k8s.io/pod-admin created (server dry run) Successful message:Error from server (NotFound): roles.rbac.authorization.k8s.io "pod-admin" not found has: not found role.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:163: Successful get role/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[m[32mrbac.sh:164: Successful get role/pod-admin {{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}: pods: (B[m[32mrbac.sh:165: Successful get role/pod-admin {{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}: : (B[mSuccessful ... skipping 412 lines ... has:valid-pod Successful message:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 1s has:valid-pod [32mcore.sh:190: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: resource(s) were provided, but no name, label selector, or --all flag specified [32mcore.sh:194: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:198: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: setting 'all' parameter but found a non empty selector. [32mcore.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mwarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mcore.sh:210: Successful get pods -l'name in (valid-pod)' {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mcore.sh:215: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:: : ... skipping 19 lines ... (B[mpoddisruptionbudget.policy/test-pdb-2 created [32mcore.sh:259: Successful get pdb/test-pdb-2 --namespace=test-kubectl-describe-pod {{.spec.minAvailable}}: 50% (B[mpoddisruptionbudget.policy/test-pdb-3 created [32mcore.sh:265: Successful get pdb/test-pdb-3 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 2 (B[mpoddisruptionbudget.policy/test-pdb-4 created [32mcore.sh:269: Successful get pdb/test-pdb-4 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 50% (B[merror: min-available and max-unavailable cannot be both specified [32mcore.sh:275: Successful get pods --namespace=test-kubectl-describe-pod {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/env-test-pod created matched TEST_CMD_1 matched <set to the key 'key-1' in secret 'test-secret'> matched TEST_CMD_2 matched <set to the key 'key-2' of config map 'test-configmap'> ... skipping 224 lines ... [32mcore.sh:534: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:3.4.1: (B[mSuccessful message:kubectl-create kubectl-patch has:kubectl-patch pod/valid-pod patched [32mcore.sh:554: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx: (B[m+++ [0223 09:47:57] "kubectl patch with resourceVersion 591" returns error as expected: Error from server (Conflict): Operation cannot be fulfilled on pods "valid-pod": the object has been modified; please apply your changes to the latest version and try again pod "valid-pod" deleted pod/valid-pod replaced [32mcore.sh:578: Successful get pod valid-pod {{(index .spec.containers 0).name}}: replaced-k8s-serve-hostname (B[mSuccessful message:kubectl-replace has:kubectl-replace Successful message:error: --grace-period must have --force specified has:\-\-grace-period must have \-\-force specified Successful message:error: --timeout must have --force specified has:\-\-timeout must have \-\-force specified node/node-v1-test created W0223 09:47:58.866555 59929 actual_state_of_world.go:534] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="node-v1-test" does not exist [32mcore.sh:606: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mnode/node-v1-test replaced (server dry run) node/node-v1-test replaced (dry run) [32mcore.sh:631: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mnode/node-v1-test replaced [32mcore.sh:647: Successful get node node-v1-test {{.metadata.annotations.a}}: b ... skipping 29 lines ... spec: containers: - image: k8s.gcr.io/pause:3.4.1 name: kubernetes-pause has:localonlyvalue [32mcore.sh:683: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[merror: 'name' already has a value (valid-pod), and --overwrite is false [32mcore.sh:687: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[m[32mcore.sh:691: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[mpod/valid-pod labeled [32mcore.sh:695: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod-super-sayan (B[m[32mcore.sh:699: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mwarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. ... skipping 83 lines ... +++ Running case: test-cmd.run_kubectl_create_error_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_kubectl_create_error_tests +++ [0223 09:48:10] Creating namespace namespace-1614073690-14840 namespace/namespace-1614073690-14840 created Context "test" modified. +++ [0223 09:48:10] Testing kubectl create with error Error: must specify one of -f and -k Create a resource from a file or from stdin. JSON and YAML formats are accepted. Examples: ... skipping 44 lines ... Usage: kubectl create -f FILENAME [options] Use "kubectl <command> --help" for more information about a given command. Use "kubectl options" for a list of global command-line options (applies to all commands). +++ [0223 09:48:10] "kubectl create with empty string list returns error as expected: error: error validating "hack/testdata/invalid-rc-with-empty-args.yaml": error validating data: ValidationError(ReplicationController.spec.template.spec.containers[0].args): unknown object type "nil" in ReplicationController.spec.template.spec.containers[0].args[0]; if you choose to ignore these errors, turn validation off with --validate=false +++ exit code: 0 Recording: run_kubectl_apply_tests Running command: run_kubectl_apply_tests +++ Running case: test-cmd.run_kubectl_apply_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 32 lines ... I0223 09:48:14.159813 59929 event.go:291] "Event occurred" object="namespace-1614073691-30133/test-deployment-retainkeys-8695b756f8" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-deployment-retainkeys-8695b756f8-bk2cl" deployment.apps "test-deployment-retainkeys" deleted [32mapply.sh:88: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mapply.sh:92: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[mSuccessful message:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted [32mapply.sh:101: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0223 09:48:15.364137 68331 helpers.go:567] --dry-run=true is deprecated (boolean value) and can be replaced with --dry-run=client. pod/test-pod created (dry run) pod/test-pod created (dry run) ... skipping 34 lines ... (B[mpod/b created [32mapply.sh:196: Successful get pods a {{.metadata.name}}: a (B[m[32mapply.sh:197: Successful get pods b -n nsb {{.metadata.name}}: b (B[mpod "a" deleted pod "b" deleted Successful message:error: all resources selected for prune without explicitly passing --all. To prune all resources, pass the --all flag. If you did not mean to prune all resources, specify a label selector has:all resources selected for prune without explicitly passing --all pod/a created pod/b created service/prune-svc created Warning: extensions/v1beta1 Ingress is deprecated in v1.14+, unavailable in v1.22+; use networking.k8s.io/v1 Ingress I0223 09:48:24.639204 59929 horizontal.go:359] Horizontal Pod Autoscaler frontend has been deleted in namespace-1614073687-2616 ... skipping 41 lines ... (B[mpod/b unchanged pod/a pruned Warning: extensions/v1beta1 Ingress is deprecated in v1.14+, unavailable in v1.22+; use networking.k8s.io/v1 Ingress [32mapply.sh:254: Successful get pods -n nsb {{range.items}}{{.metadata.name}}:{{end}}: b: (B[mnamespace "nsb" deleted Successful message:error: the namespace from the provided object "nsb" does not match the namespace "foo". You must pass '--namespace=nsb' to perform this operation. has:the namespace from the provided object "nsb" does not match the namespace "foo". [32mapply.sh:265: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: (B[mservice/a created [32mapply.sh:269: Successful get services a {{.metadata.name}}: a (B[mSuccessful message:The Service "a" is invalid: spec.clusterIPs[0]: Invalid value: []string{"10.0.0.12"}: may not change once set ... skipping 25 lines ... (B[m[32mapply.sh:291: Successful get deployment test-the-deployment {{.metadata.name}}: test-the-deployment (B[m[32mapply.sh:292: Successful get service test-the-service {{.metadata.name}}: test-the-service (B[mconfigmap "test-the-map" deleted service "test-the-service" deleted deployment.apps "test-the-deployment" deleted Successful message:Error from server (NotFound): namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found [32mapply.sh:300: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:namespace/multi-resource-ns created Error from server (NotFound): error when creating "hack/testdata/multi-resource-1.yaml": namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found Successful message:Error from server (NotFound): pods "test-pod" not found has:pods "test-pod" not found pod/test-pod created namespace/multi-resource-ns unchanged [32mapply.sh:308: Successful get pods test-pod -n multi-resource-ns {{.metadata.name}}: test-pod (B[mpod "test-pod" deleted namespace "multi-resource-ns" deleted I0223 09:48:54.000296 56228 client.go:360] parsed scheme: "passthrough" I0223 09:48:54.000396 56228 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{http://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>} I0223 09:48:54.000410 56228 clientconn.go:948] ClientConn switching balancer to "pick_first" I0223 09:48:54.019289 59929 namespace_controller.go:185] Namespace has been deleted nsb [32mapply.sh:314: Successful get configmaps --field-selector=metadata.name=foo {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:configmap/foo created error: unable to recognize "hack/testdata/multi-resource-2.yaml": no matches for kind "Bogus" in version "example.com/v1" has:no matches for kind "Bogus" in version "example.com/v1" [32mapply.sh:320: Successful get configmaps foo {{.metadata.name}}: foo (B[mconfigmap "foo" deleted [32mapply.sh:326: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:pod/pod-a created ... skipping 6 lines ... pod "pod-c" deleted [32mapply.sh:334: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapply.sh:338: Successful get crds {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:Warning: apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition customresourcedefinition.apiextensions.k8s.io/widgets.example.com created error: unable to recognize "hack/testdata/multi-resource-4.yaml": no matches for kind "Widget" in version "example.com/v1" has:no matches for kind "Widget" in version "example.com/v1" I0223 09:48:59.797533 56228 client.go:360] parsed scheme: "endpoint" I0223 09:48:59.797601 56228 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 <nil> 0 <nil>}] Successful message:Error from server (NotFound): widgets.example.com "foo" not found has:widgets.example.com "foo" not found [32mapply.sh:344: Successful get crds widgets.example.com {{.metadata.name}}: widgets.example.com (B[mI0223 09:49:02.125117 56228 controller.go:611] quota admission added evaluator for: widgets.example.com widget.example.com/foo created Warning: apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition customresourcedefinition.apiextensions.k8s.io/widgets.example.com unchanged ... skipping 34 lines ... message:871 has:871 pod "test-pod" deleted [32mapply.sh:403: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m+++ [0223 09:49:05] Testing upgrade kubectl client-side apply to server-side apply pod/test-pod created error: Apply failed with 1 conflict: conflict with "kubectl-client-side-apply" using v1: .metadata.labels.name Please review the fields above--they currently have other managers. Here are the ways you can resolve this warning: * If you intend to manage all of these fields, please re-run the apply command with the `--force-conflicts` flag. * If you do not intend to manage all of the fields, please edit your manifest to remove references to the fields that should keep their ... skipping 79 lines ... (B[mpod "nginx-extensions" deleted Successful message:pod/test1 created has:pod/test1 created pod "test1" deleted Successful message:error: Invalid image name "InvalidImageName": invalid reference format has:error: Invalid image name "InvalidImageName": invalid reference format +++ exit code: 0 Recording: run_kubectl_create_filter_tests Running command: run_kubectl_create_filter_tests +++ Running case: test-cmd.run_kubectl_create_filter_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 3 lines ... Context "test" modified. +++ [0223 09:49:09] Testing kubectl create filter [32mcreate.sh:50: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mcreate.sh:54: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[mSuccessful message:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted +++ exit code: 0 Recording: run_kubectl_apply_deployments_tests Running command: run_kubectl_apply_deployments_tests ... skipping 29 lines ... I0223 09:49:13.300437 59929 event.go:291] "Event occurred" object="namespace-1614073750-10427/nginx" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-9bb9c4878 to 3" I0223 09:49:13.304344 59929 event.go:291] "Event occurred" object="namespace-1614073750-10427/nginx-9bb9c4878" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-9bb9c4878-swsl7" I0223 09:49:13.312034 59929 event.go:291] "Event occurred" object="namespace-1614073750-10427/nginx-9bb9c4878" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-9bb9c4878-8km48" I0223 09:49:13.312076 59929 event.go:291] "Event occurred" object="namespace-1614073750-10427/nginx-9bb9c4878" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-9bb9c4878-2kprs" [32mapps.sh:152: Successful get deployment nginx {{.metadata.name}}: nginx (B[mSuccessful message:Error from server (Conflict): error when applying patch: {"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1614073750-10427\",\"resourceVersion\":\"99\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx2\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx2\"}},\"spec\":{\"containers\":[{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"},"resourceVersion":"99"},"spec":{"selector":{"matchLabels":{"name":"nginx2"}},"template":{"metadata":{"labels":{"name":"nginx2"}}}}} to: Resource: "apps/v1, Resource=deployments", GroupVersionKind: "apps/v1, Kind=Deployment" Name: "nginx", Namespace: "namespace-1614073750-10427" for: "hack/testdata/deployment-label-change2.yaml": Operation cannot be fulfilled on deployments.apps "nginx": the object has been modified; please apply your changes to the latest version and try again has:Error from server (Conflict) deployment.apps/nginx configured I0223 09:49:21.923114 59929 event.go:291] "Event occurred" object="namespace-1614073750-10427/nginx" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-6dd6cfdb57 to 3" I0223 09:49:21.932137 59929 event.go:291] "Event occurred" object="namespace-1614073750-10427/nginx-6dd6cfdb57" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-6dd6cfdb57-hbdmw" I0223 09:49:21.940155 59929 event.go:291] "Event occurred" object="namespace-1614073750-10427/nginx-6dd6cfdb57" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-6dd6cfdb57-n48pt" I0223 09:49:21.940192 59929 event.go:291] "Event occurred" object="namespace-1614073750-10427/nginx-6dd6cfdb57" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-6dd6cfdb57-bp4ps" Successful ... skipping 300 lines ... +++ [0223 09:49:30] Creating namespace namespace-1614073770-25993 namespace/namespace-1614073770-25993 created Context "test" modified. +++ [0223 09:49:31] Testing kubectl get [32mget.sh:29: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:37: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:45: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:{ "apiVersion": "v1", "items": [], ... skipping 23 lines ... has not:No resources found Successful message:NAME has not:No resources found [32mget.sh:73: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:error: the server doesn't have a resource type "foobar" has not:No resources found Successful message:No resources found in namespace-1614073770-25993 namespace. has:No resources found Successful message: has not:No resources found Successful message:No resources found in namespace-1614073770-25993 namespace. has:No resources found [32mget.sh:93: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:Error from server (NotFound): pods "abc" not found has:pods "abc" not found Successful message:Error from server (NotFound): pods "abc" not found has not:List Successful message:I0223 09:49:32.999838 71823 loader.go:372] Config loaded from file: /tmp/tmp.oSgmtXVaz9/.kube/config I0223 09:49:33.074407 71823 round_trippers.go:454] GET https://127.0.0.1:6443/version?timeout=32s 200 OK in 73 milliseconds I0223 09:49:33.099878 71823 round_trippers.go:454] GET https://127.0.0.1:6443/api/v1/namespaces/default/pods 200 OK in 2 milliseconds I0223 09:49:33.102089 71823 round_trippers.go:454] GET https://127.0.0.1:6443/api/v1/namespaces/default/replicationcontrollers 200 OK in 1 milliseconds ... skipping 594 lines ... } [32mget.sh:158: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m<no value>Successful message:valid-pod: has:valid-pod: Successful message:error: error executing jsonpath "{.missing}": Error executing template: missing is not found. Printing more information for debugging the template: template was: {.missing} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "kind":"Pod", "metadata":map[string]interface {}{"creationTimestamp":"2021-02-23T09:49:40Z", "labels":map[string]interface {}{"name":"valid-pod"}, "managedFields":[]interface {}{map[string]interface {}{"apiVersion":"v1", "fieldsType":"FieldsV1", "fieldsV1":map[string]interface {}{"f:metadata":map[string]interface {}{"f:labels":map[string]interface {}{".":map[string]interface {}{}, "f:name":map[string]interface {}{}}}, "f:spec":map[string]interface {}{"f:containers":map[string]interface {}{"k:{\"name\":\"kubernetes-serve-hostname\"}":map[string]interface {}{".":map[string]interface {}{}, "f:image":map[string]interface {}{}, "f:imagePullPolicy":map[string]interface {}{}, "f:name":map[string]interface {}{}, "f:resources":map[string]interface {}{".":map[string]interface {}{}, "f:limits":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}, "f:requests":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}}, "f:terminationMessagePath":map[string]interface {}{}, "f:terminationMessagePolicy":map[string]interface {}{}}}, "f:dnsPolicy":map[string]interface {}{}, "f:enableServiceLinks":map[string]interface {}{}, "f:restartPolicy":map[string]interface {}{}, "f:schedulerName":map[string]interface {}{}, "f:securityContext":map[string]interface {}{}, "f:terminationGracePeriodSeconds":map[string]interface {}{}}}, "manager":"kubectl-create", "operation":"Update", "time":"2021-02-23T09:49:40Z"}}, "name":"valid-pod", "namespace":"namespace-1614073780-15500", "resourceVersion":"1040", "uid":"22d3a619-b105-40e9-977f-e854a7c12aa7"}, "spec":map[string]interface {}{"containers":[]interface {}{map[string]interface {}{"image":"k8s.gcr.io/serve_hostname", "imagePullPolicy":"Always", "name":"kubernetes-serve-hostname", "resources":map[string]interface {}{"limits":map[string]interface {}{"cpu":"1", "memory":"512Mi"}, "requests":map[string]interface {}{"cpu":"1", "memory":"512Mi"}}, "terminationMessagePath":"/dev/termination-log", "terminationMessagePolicy":"File"}}, "dnsPolicy":"ClusterFirst", "enableServiceLinks":true, "preemptionPolicy":"PreemptLowerPriority", "priority":0, "restartPolicy":"Always", "schedulerName":"default-scheduler", "securityContext":map[string]interface {}{}, "terminationGracePeriodSeconds":30}, "status":map[string]interface {}{"phase":"Pending", "qosClass":"Guaranteed"}} has:missing is not found error: error executing template "{{.missing}}": template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing" Successful message:Error executing template: template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing". Printing more information for debugging the template: template was: {{.missing}} raw data was: {"apiVersion":"v1","kind":"Pod","metadata":{"creationTimestamp":"2021-02-23T09:49:40Z","labels":{"name":"valid-pod"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"kubernetes-serve-hostname\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{".":{},"f:limits":{".":{},"f:cpu":{},"f:memory":{}},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}},"manager":"kubectl-create","operation":"Update","time":"2021-02-23T09:49:40Z"}],"name":"valid-pod","namespace":"namespace-1614073780-15500","resourceVersion":"1040","uid":"22d3a619-b105-40e9-977f-e854a7c12aa7"},"spec":{"containers":[{"image":"k8s.gcr.io/serve_hostname","imagePullPolicy":"Always","name":"kubernetes-serve-hostname","resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"1","memory":"512Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority","priority":0,"restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30},"status":{"phase":"Pending","qosClass":"Guaranteed"}} object given to template engine was: map[apiVersion:v1 kind:Pod metadata:map[creationTimestamp:2021-02-23T09:49:40Z labels:map[name:valid-pod] managedFields:[map[apiVersion:v1 fieldsType:FieldsV1 fieldsV1:map[f:metadata:map[f:labels:map[.:map[] f:name:map[]]] f:spec:map[f:containers:map[k:{"name":"kubernetes-serve-hostname"}:map[.:map[] f:image:map[] f:imagePullPolicy:map[] f:name:map[] f:resources:map[.:map[] f:limits:map[.:map[] f:cpu:map[] f:memory:map[]] f:requests:map[.:map[] f:cpu:map[] f:memory:map[]]] f:terminationMessagePath:map[] f:terminationMessagePolicy:map[]]] f:dnsPolicy:map[] f:enableServiceLinks:map[] f:restartPolicy:map[] f:schedulerName:map[] f:securityContext:map[] f:terminationGracePeriodSeconds:map[]]] manager:kubectl-create operation:Update time:2021-02-23T09:49:40Z]] name:valid-pod namespace:namespace-1614073780-15500 resourceVersion:1040 uid:22d3a619-b105-40e9-977f-e854a7c12aa7] spec:map[containers:[map[image:k8s.gcr.io/serve_hostname imagePullPolicy:Always name:kubernetes-serve-hostname resources:map[limits:map[cpu:1 memory:512Mi] requests:map[cpu:1 memory:512Mi]] terminationMessagePath:/dev/termination-log terminationMessagePolicy:File]] dnsPolicy:ClusterFirst enableServiceLinks:true preemptionPolicy:PreemptLowerPriority priority:0 restartPolicy:Always schedulerName:default-scheduler securityContext:map[] terminationGracePeriodSeconds:30] status:map[phase:Pending qosClass:Guaranteed]] ... skipping 84 lines ... terminationGracePeriodSeconds: 30 status: phase: Pending qosClass: Guaranteed has:name: valid-pod Successful message:Error from server (NotFound): pods "invalid-pod" not found has:"invalid-pod" not found pod "valid-pod" deleted [32mget.sh:196: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/redis-master created pod/valid-pod created Successful ... skipping 36 lines ... +++ [0223 09:49:46] Creating namespace namespace-1614073786-14490 namespace/namespace-1614073786-14490 created Context "test" modified. +++ [0223 09:49:46] Testing kubectl exec POD COMMAND Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (NotFound): pods "abc" not found has:pods "abc" not found pod/test-pod created Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pods "test-pod" not found Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pod or type/name must be specified pod "test-pod" deleted +++ exit code: 0 Recording: run_kubectl_exec_resource_name_tests Running command: run_kubectl_exec_resource_name_tests ... skipping 3 lines ... +++ [0223 09:49:47] Creating namespace namespace-1614073787-6547 namespace/namespace-1614073787-6547 created Context "test" modified. +++ [0223 09:49:47] Testing kubectl exec TYPE/NAME COMMAND Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. error: the server doesn't have a resource type "foo" has:error: Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (NotFound): deployments.apps "bar" not found has:"bar" not found pod/test-pod created replicaset.apps/frontend created I0223 09:49:48.154982 59929 event.go:291] "Event occurred" object="namespace-1614073787-6547/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-kr7pt" I0223 09:49:48.159080 59929 event.go:291] "Event occurred" object="namespace-1614073787-6547/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-wjmt2" I0223 09:49:48.159256 59929 event.go:291] "Event occurred" object="namespace-1614073787-6547/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-rjss8" configmap/test-set-env-config created Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. error: cannot attach to *v1.ConfigMap: selector for *v1.ConfigMap not implemented has:not implemented Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:not found Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pod, type/name or --filename must be specified Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod frontend-kr7pt does not have a host assigned has not:not found Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod frontend-kr7pt does not have a host assigned has not:pod, type/name or --filename must be specified pod "test-pod" deleted replicaset.apps "frontend" deleted configmap "test-set-env-config" deleted +++ exit code: 0 Recording: run_create_secret_tests Running command: run_create_secret_tests +++ Running case: test-cmd.run_create_secret_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_create_secret_tests Successful message:Error from server (NotFound): secrets "mysecret" not found has:secrets "mysecret" not found Successful message:user-specified has:user-specified Successful message:Error from server (NotFound): secrets "mysecret" not found has:secrets "mysecret" not found Successful {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"bfeb6da0-c141-413a-b869-c167133f788a","resourceVersion":"1119","creationTimestamp":"2021-02-23T09:49:49Z"}} Successful message:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"bfeb6da0-c141-413a-b869-c167133f788a","resourceVersion":"1121","creationTimestamp":"2021-02-23T09:49:49Z"},"data":{"key1":"config1"}} has:uid Successful message:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"bfeb6da0-c141-413a-b869-c167133f788a","resourceVersion":"1121","creationTimestamp":"2021-02-23T09:49:49Z"},"data":{"key1":"config1"}} has:config1 {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Success","details":{"name":"tester-update-cm","kind":"configmaps","uid":"bfeb6da0-c141-413a-b869-c167133f788a"}} Successful message:Error from server (NotFound): configmaps "tester-update-cm" not found has:configmaps "tester-update-cm" not found +++ exit code: 0 Recording: run_kubectl_create_kustomization_directory_tests Running command: run_kubectl_create_kustomization_directory_tests +++ Running case: test-cmd.run_kubectl_create_kustomization_directory_tests ... skipping 73 lines ... securityContext: {} terminationGracePeriodSeconds: 30 status: {} has:apps/v1beta1 deployment.apps "nginx" deleted Successful message:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing Successful message:nginx: has:nginx: +++ exit code: 0 Recording: run_kubectl_delete_allnamespaces_tests ... skipping 104 lines ... has:Timeout Successful message:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 1s has:valid-pod Successful message:error: Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h) has:Invalid timeout value pod "valid-pod" deleted +++ exit code: 0 Recording: run_crd_tests Running command: run_crd_tests ... skipping 158 lines ... foo.company.com/test patched [32mcrd.sh:236: Successful get foos/test {{.patched}}: value1 (B[mfoo.company.com/test patched [32mcrd.sh:238: Successful get foos/test {{.patched}}: value2 (B[mfoo.company.com/test patched [32mcrd.sh:240: Successful get foos/test {{.patched}}: <no value> (B[m+++ [0223 09:50:02] "kubectl patch --local" returns error as expected for CustomResource: error: strategic merge patch is not supported for company.com/v1, Kind=Foo locally, try --type merge { "apiVersion": "company.com/v1", "kind": "Foo", "metadata": { "annotations": { "kubernetes.io/change-cause": "kubectl patch foos/test --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true --patch={\"patched\":null} --type=merge --record=true" ... skipping 293 lines ... (B[m[32mcrd.sh:450: Successful get bars {{range.items}}{{.metadata.name}}:{{end}}: (B[mnamespace/non-native-resources created bar.company.com/test created [32mcrd.sh:455: Successful get bars {{len .items}}: 1 (B[mnamespace "non-native-resources" deleted [32mcrd.sh:458: Successful get bars {{len .items}}: 0 (B[mError from server (NotFound): namespaces "non-native-resources" not found customresourcedefinition.apiextensions.k8s.io "foos.company.com" deleted customresourcedefinition.apiextensions.k8s.io "bars.company.com" deleted customresourcedefinition.apiextensions.k8s.io "resources.mygroup.example.com" deleted customresourcedefinition.apiextensions.k8s.io "validfoos.company.com" deleted +++ exit code: 0 +++ [0223 09:50:24] Testing recursive resources ... skipping 2 lines ... Context "test" modified. [32mgeneric-resources.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mgeneric-resources.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mSuccessful message:pod/busybox0 created pod/busybox1 created error: error validating "hack/testdata/recursive/pod/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set W0223 09:50:25.591071 56228 cacher.go:148] Terminating all watchers from cacher *unstructured.Unstructured E0223 09:50:25.592986 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:211: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mW0223 09:50:25.698368 56228 cacher.go:148] Terminating all watchers from cacher *unstructured.Unstructured E0223 09:50:25.700042 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource W0223 09:50:25.797348 56228 cacher.go:148] Terminating all watchers from cacher *unstructured.Unstructured E0223 09:50:25.799235 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:220: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: busybox:busybox: (B[mSuccessful message:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing W0223 09:50:25.902599 56228 cacher.go:148] Terminating all watchers from cacher *unstructured.Unstructured E0223 09:50:25.904281 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:227: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:231: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced: (B[mSuccessful message:pod/busybox0 replaced pod/busybox1 replaced error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:236: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mSuccessful message:Name: busybox0 Namespace: namespace-1614073824-4512 Priority: 0 Node: <none> ... skipping 155 lines ... Node-Selectors: <none> Tolerations: <none> Events: <none> unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:246: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mE0223 09:50:26.777744 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:250: Successful get pods {{range.items}}{{.metadata.annotations.annotatekey}}:{{end}}: annotatevalue:annotatevalue: (B[mSuccessful message:pod/busybox0 annotated pod/busybox1 annotated error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing E0223 09:50:26.952161 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0223 09:50:26.959986 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:255: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mE0223 09:50:27.096447 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:259: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced: (B[mSuccessful message:Warning: resource pods/busybox0 is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. pod/busybox0 configured Warning: resource pods/busybox1 is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. pod/busybox1 configured error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:264: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mSuccessful message:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:busybox0:busybox1: Successful message:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:273: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mpod/busybox0 labeled pod/busybox1 labeled error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' [32mgeneric-resources.sh:278: Successful get pods {{range.items}}{{.metadata.labels.mylabel}}:{{end}}: myvalue:myvalue: (B[mSuccessful message:pod/busybox0 labeled pod/busybox1 labeled error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:283: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mpod/busybox0 patched pod/busybox1 patched error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' [32mgeneric-resources.sh:288: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: prom/busybox:prom/busybox: (B[mSuccessful message:pod/busybox0 patched pod/busybox1 patched error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:293: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:297: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "busybox0" force deleted pod "busybox1" force deleted error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:302: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mE0223 09:50:28.612821 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource replicationcontroller/busybox0 created I0223 09:50:28.676952 59929 event.go:291] "Event occurred" object="namespace-1614073824-4512/busybox0" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-zgx7r" replicationcontroller/busybox1 created error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0223 09:50:28.683606 59929 event.go:291] "Event occurred" object="namespace-1614073824-4512/busybox1" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-8ntmf" [32mgeneric-resources.sh:306: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:311: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:312: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:313: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:318: Successful get hpa busybox0 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 80 (B[mI0223 09:50:29.300254 59929 namespace_controller.go:185] Namespace has been deleted non-native-resources [32mgeneric-resources.sh:319: Successful get hpa busybox1 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 80 (B[mSuccessful message:horizontalpodautoscaler.autoscaling/busybox0 autoscaled horizontalpodautoscaler.autoscaling/busybox1 autoscaled error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing E0223 09:50:29.383719 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource horizontalpodautoscaler.autoscaling "busybox0" deleted horizontalpodautoscaler.autoscaling "busybox1" deleted E0223 09:50:29.596003 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:327: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:328: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:329: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:333: Successful get service busybox0 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 (B[mE0223 09:50:30.074856 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:334: Successful get service busybox1 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 (B[mSuccessful message:service/busybox0 exposed service/busybox1 exposed error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:340: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:341: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:342: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[mI0223 09:50:30.505160 59929 event.go:291] "Event occurred" object="namespace-1614073824-4512/busybox0" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-97dxg" I0223 09:50:30.518896 59929 event.go:291] "Event occurred" object="namespace-1614073824-4512/busybox1" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-gn29s" [32mgeneric-resources.sh:346: Successful get rc busybox0 {{.spec.replicas}}: 2 (B[m[32mgeneric-resources.sh:347: Successful get rc busybox1 {{.spec.replicas}}: 2 (B[mSuccessful message:replicationcontroller/busybox0 scaled replicationcontroller/busybox1 scaled error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:352: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:356: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. replicationcontroller "busybox0" force deleted replicationcontroller "busybox1" force deleted error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:361: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mdeployment.apps/nginx1-deployment created deployment.apps/nginx0-deployment created error: error validating "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0223 09:50:31.353577 59929 event.go:291] "Event occurred" object="namespace-1614073824-4512/nginx1-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx1-deployment-758b5949b6 to 2" I0223 09:50:31.358864 59929 event.go:291] "Event occurred" object="namespace-1614073824-4512/nginx1-deployment-758b5949b6" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx1-deployment-758b5949b6-bwqgb" I0223 09:50:31.359013 59929 event.go:291] "Event occurred" object="namespace-1614073824-4512/nginx0-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx0-deployment-75db9cdfd9 to 2" I0223 09:50:31.366867 59929 event.go:291] "Event occurred" object="namespace-1614073824-4512/nginx0-deployment-75db9cdfd9" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx0-deployment-75db9cdfd9-kwvgf" I0223 09:50:31.367566 59929 event.go:291] "Event occurred" object="namespace-1614073824-4512/nginx1-deployment-758b5949b6" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx1-deployment-758b5949b6-8m5z6" I0223 09:50:31.370338 59929 event.go:291] "Event occurred" object="namespace-1614073824-4512/nginx0-deployment-75db9cdfd9" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx0-deployment-75db9cdfd9-46828" [32mgeneric-resources.sh:365: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx0-deployment:nginx1-deployment: (B[m[32mgeneric-resources.sh:366: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9:k8s.gcr.io/nginx:1.7.9: (B[m[32mgeneric-resources.sh:370: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9:k8s.gcr.io/nginx:1.7.9: (B[mSuccessful message:deployment.apps/nginx1-deployment skipped rollback (current template already matches revision 1) deployment.apps/nginx0-deployment skipped rollback (current template already matches revision 1) error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing deployment.apps/nginx1-deployment paused deployment.apps/nginx0-deployment paused [32mgeneric-resources.sh:378: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: true:true: (B[mSuccessful message:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' ... skipping 10 lines ... 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:nginx0-deployment Successful message:deployment.apps/nginx1-deployment REVISION CHANGE-CAUSE 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:nginx1-deployment Successful message:deployment.apps/nginx1-deployment REVISION CHANGE-CAUSE 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. deployment.apps "nginx1-deployment" force deleted deployment.apps "nginx0-deployment" force deleted error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' E0223 09:50:33.072151 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0223 09:50:33.492747 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:400: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicationcontroller/busybox0 created I0223 09:50:33.822424 59929 event.go:291] "Event occurred" object="namespace-1614073824-4512/busybox0" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-v7mqz" replicationcontroller/busybox1 created error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0223 09:50:33.829832 59929 event.go:291] "Event occurred" object="namespace-1614073824-4512/busybox1" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-84p47" [32mgeneric-resources.sh:404: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mSuccessful message:no rollbacker has been implemented for "ReplicationController" no rollbacker has been implemented for "ReplicationController" unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' ... skipping 2 lines ... message:no rollbacker has been implemented for "ReplicationController" no rollbacker has been implemented for "ReplicationController" unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:Object 'Kind' is missing Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:replicationcontrollers "busybox0" pausing is not supported Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:replicationcontrollers "busybox1" pausing is not supported Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:Object 'Kind' is missing Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:replicationcontrollers "busybox0" resuming is not supported Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:replicationcontrollers "busybox1" resuming is not supported warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. replicationcontroller "busybox0" force deleted replicationcontroller "busybox1" force deleted error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' E0223 09:50:34.346264 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0223 09:50:35.128733 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource Recording: run_namespace_tests Running command: run_namespace_tests +++ Running case: test-cmd.run_namespace_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_namespace_tests +++ [0223 09:50:35] Testing kubectl(v1:namespaces) Successful message:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created (dry run) namespace/my-namespace created (server dry run) Successful message:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created [32mcore.sh:1459: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace (B[mnamespace "my-namespace" deleted E0223 09:50:40.291781 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource namespace/my-namespace condition met Successful message:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created [32mcore.sh:1468: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace (B[mSuccessful message:warning: deleting cluster-scoped resources, not scoped to the provided namespace namespace "kube-node-lease" deleted ... skipping 31 lines ... namespace "namespace-1614073791-14146" deleted namespace "namespace-1614073791-29717" deleted namespace "namespace-1614073793-31145" deleted namespace "namespace-1614073795-4004" deleted namespace "namespace-1614073797-771" deleted namespace "namespace-1614073824-4512" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted has:warning: deleting cluster-scoped resources Successful message:warning: deleting cluster-scoped resources, not scoped to the provided namespace namespace "kube-node-lease" deleted namespace "my-namespace" deleted namespace "namespace-1614073628-27395" deleted ... skipping 29 lines ... namespace "namespace-1614073791-14146" deleted namespace "namespace-1614073791-29717" deleted namespace "namespace-1614073793-31145" deleted namespace "namespace-1614073795-4004" deleted namespace "namespace-1614073797-771" deleted namespace "namespace-1614073824-4512" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted has:namespace "my-namespace" deleted namespace/quotas created [32mcore.sh:1475: Successful get namespaces/quotas {{.metadata.name}}: quotas (B[m[32mcore.sh:1476: Successful get quota --namespace=quotas {{range.items}}{{ if eq .metadata.name \"test-quota\" }}found{{end}}{{end}}:: : (B[mresourcequota/test-quota created (dry run) I0223 09:50:42.151634 56228 client.go:360] parsed scheme: "passthrough" ... skipping 9 lines ... resourcequota "test-quota" deleted namespace "quotas" deleted I0223 09:50:42.922013 59929 shared_informer.go:240] Waiting for caches to sync for resource quota I0223 09:50:42.922071 59929 shared_informer.go:247] Caches are synced for resource quota I0223 09:50:44.151342 59929 horizontal.go:359] Horizontal Pod Autoscaler busybox0 has been deleted in namespace-1614073824-4512 I0223 09:50:44.155882 59929 horizontal.go:359] Horizontal Pod Autoscaler busybox1 has been deleted in namespace-1614073824-4512 E0223 09:50:45.360757 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0223 09:50:45.807026 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0223 09:50:47.044824 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcore.sh:1495: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"other\" }}found{{end}}{{end}}:: : (B[mnamespace/other created [32mcore.sh:1499: Successful get namespaces/other {{.metadata.name}}: other (B[m[32mcore.sh:1503: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/valid-pod created [32mcore.sh:1507: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:1509: Successful get pods -n other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mSuccessful message:error: a resource cannot be retrieved by name across all namespaces has:a resource cannot be retrieved by name across all namespaces [32mcore.sh:1516: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mwarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mcore.sh:1520: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: (B[mnamespace "other" deleted ... skipping 117 lines ... (B[m[32mcore.sh:911: Successful get secret/secret-string-data --namespace=test-secrets {{.stringData}}: <no value> (B[msecret "secret-string-data" deleted [32mcore.sh:920: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: (B[msecret "test-secret" deleted namespace "test-secrets" deleted I0223 09:50:59.131730 59929 namespace_controller.go:185] Namespace has been deleted other E0223 09:51:01.232412 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0223 09:51:02.284307 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource +++ exit code: 0 Recording: run_configmap_tests Running command: run_configmap_tests +++ Running case: test-cmd.run_configmap_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 17 lines ... configmap/test-binary-configmap created [32mcore.sh:51: Successful get configmap/test-configmap --namespace=test-configmaps {{.metadata.name}}: test-configmap (B[m[32mcore.sh:52: Successful get configmap/test-binary-configmap --namespace=test-configmaps {{.metadata.name}}: test-binary-configmap (B[mconfigmap "test-configmap" deleted configmap "test-binary-configmap" deleted namespace "test-configmaps" deleted E0223 09:51:06.019504 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0223 09:51:08.106179 59929 namespace_controller.go:185] Namespace has been deleted test-secrets E0223 09:51:10.338985 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource +++ exit code: 0 Recording: run_client_config_tests Running command: run_client_config_tests +++ Running case: test-cmd.run_client_config_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_client_config_tests +++ [0223 09:51:10] Creating namespace namespace-1614073870-4687 namespace/namespace-1614073870-4687 created Context "test" modified. +++ [0223 09:51:10] Testing client config Successful message:error: stat missing: no such file or directory has:missing: no such file or directory Successful message:error: stat missing: no such file or directory has:missing: no such file or directory Successful message:error: stat missing: no such file or directory has:missing: no such file or directory Successful message:Error in configuration: context was not found for specified context: missing-context has:context was not found for specified context: missing-context Successful message:error: no server found for cluster "missing-cluster" has:no server found for cluster "missing-cluster" Successful message:error: auth info "missing-user" does not exist has:auth info "missing-user" does not exist Successful message:error: error loading config file "/tmp/newconfig.yaml": no kind "Config" is registered for version "v-1" in scheme "k8s.io/client-go/tools/clientcmd/api/latest/latest.go:50" has:error loading config file Successful message:error: stat missing-config: no such file or directory has:no such file or directory +++ exit code: 0 Recording: run_service_accounts_tests Running command: run_service_accounts_tests +++ Running case: test-cmd.run_service_accounts_tests ... skipping 43 lines ... Labels: <none> Annotations: <none> Schedule: 59 23 31 2 * Concurrency Policy: Allow Suspend: False Successful Job History Limit: 3 Failed Job History Limit: 1 Starting Deadline Seconds: <unset> Selector: <unset> Parallelism: <unset> Completions: <unset> Pod Template: Labels: <none> ... skipping 38 lines ... Labels: controller-uid=0359e883-5dde-4ac5-bd1b-f78e8caf07ac job-name=test-job Annotations: cronjob.kubernetes.io/instantiate: manual Parallelism: 1 Completions: 1 Start Time: Tue, 23 Feb 2021 09:51:19 +0000 Pods Statuses: 1 Running / 0 Succeeded / 0 Failed Pod Template: Labels: controller-uid=0359e883-5dde-4ac5-bd1b-f78e8caf07ac job-name=test-job Containers: pi: Image: k8s.gcr.io/perl ... skipping 419 lines ... type: ClusterIP status: loadBalancer: {} Successful message:kubectl-create kubectl-set has:kubectl-set error: you must specify resources by --filename when --local is set. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' I0223 09:51:30.558790 59929 namespace_controller.go:185] Namespace has been deleted test-jobs [32mcore.sh:1020: Successful get services redis-master {{range.spec.selector}}{{.}}:{{end}}: redis:master:backend: (B[mservice/redis-master selector updated Successful message:Error from server (Conflict): Operation cannot be fulfilled on services "redis-master": the object has been modified; please apply your changes to the latest version and try again has:Conflict [32mcore.sh:1033: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master: (B[mservice "redis-master" deleted [32mcore.sh:1040: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[m[32mcore.sh:1044: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[mservice/redis-master created [32mE0223 09:51:31.730935 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource core.sh:1048: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master: (B[m[32mcore.sh:1052: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master: (B[mservice/service-v1-test created [32mcore.sh:1073: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:service-v1-test: (B[mservice/service-v1-test replaced [32mcore.sh:1080: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:service-v1-test: ... skipping 39 lines ... (B[m[32mcore.sh:1153: Successful get service testmetadata {{.metadata.annotations}}: map[zone-context:home] (B[mSuccessful message:kubectl-run has:kubectl-run service/exposemetadata exposed [32mcore.sh:1162: Successful get service exposemetadata {{.metadata.annotations}}: map[zone-context:work] (B[mE0223 09:51:36.021841 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource Successful message:kubectl-expose has:kubectl-expose service "exposemetadata" deleted service "testmetadata" deleted pod "testmetadata" deleted ... skipping 65 lines ... (dry run) daemonset.apps/bind rolled back (server dry run) [32mapps.sh:87: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:latest: (B[m[32mapps.sh:88: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[m[32mapps.sh:89: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2 (B[mdaemonset.apps/bind rolled back E0223 09:51:40.773030 59929 daemon_controller.go:320] namespace-1614073898-5512/bind failed with : error storing status for daemon set &v1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"bind", GenerateName:"", Namespace:"namespace-1614073898-5512", SelfLink:"", UID:"1770a8cf-0bb8-4831-9332-a95873f0f6ed", ResourceVersion:"1998", Generation:3, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63749670698, loc:(*time.Location)(0x6f815e0)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"service":"bind"}, Annotations:map[string]string{"deprecated.daemonset.template.generation":"3", "kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"DaemonSet\",\"metadata\":{\"annotations\":{\"kubernetes.io/change-cause\":\"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true\"},\"labels\":{\"service\":\"bind\"},\"name\":\"bind\",\"namespace\":\"namespace-1614073898-5512\"},\"spec\":{\"selector\":{\"matchLabels\":{\"service\":\"bind\"}},\"template\":{\"metadata\":{\"labels\":{\"service\":\"bind\"}},\"spec\":{\"affinity\":{\"podAntiAffinity\":{\"requiredDuringSchedulingIgnoredDuringExecution\":[{\"labelSelector\":{\"matchExpressions\":[{\"key\":\"service\",\"operator\":\"In\",\"values\":[\"bind\"]}]},\"namespaces\":[],\"topologyKey\":\"kubernetes.io/hostname\"}]}},\"containers\":[{\"image\":\"k8s.gcr.io/pause:latest\",\"name\":\"kubernetes-pause\"},{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"app\"}]}},\"updateStrategy\":{\"rollingUpdate\":{\"maxUnavailable\":\"10%\"},\"type\":\"RollingUpdate\"}}}\n", "kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"kube-controller-manager", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc001dcc180), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc001dcc1c0)}, v1.ManagedFieldsEntry{Manager:"kubectl-client-side-apply", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc001dcc220), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc001dcc4a0)}, v1.ManagedFieldsEntry{Manager:"kubectl", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc001dcc4e0), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc001dcc520)}}}, Spec:v1.DaemonSetSpec{Selector:(*v1.LabelSelector)(0xc001dcc560), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"service":"bind"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume(nil), InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"kubernetes-pause", Image:"k8s.gcr.io/pause:2.0", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount(nil), VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc0030244d8), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"", DeprecatedServiceAccount:"", AutomountServiceAccountToken:(*bool)(nil), NodeName:"", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc0004e5a40), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(0xc001dcc5a0), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration(nil), HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(nil), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(nil), PreemptionPolicy:(*v1.PreemptionPolicy)(nil), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil), SetHostnameAsFQDN:(*bool)(nil)}}, UpdateStrategy:v1.DaemonSetUpdateStrategy{Type:"RollingUpdate", RollingUpdate:(*v1.RollingUpdateDaemonSet)(0xc003011fa0)}, MinReadySeconds:0, RevisionHistoryLimit:(*int32)(0xc00302452c)}, Status:v1.DaemonSetStatus{CurrentNumberScheduled:0, NumberMisscheduled:0, DesiredNumberScheduled:0, NumberReady:0, ObservedGeneration:2, UpdatedNumberScheduled:0, NumberAvailable:0, NumberUnavailable:0, CollisionCount:(*int32)(nil), Conditions:[]v1.DaemonSetCondition(nil)}}: Operation cannot be fulfilled on daemonsets.apps "bind": the object has been modified; please apply your changes to the latest version and try again [32mapps.sh:92: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:2.0: (B[m[32mapps.sh:93: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mSuccessful message:error: unable to find specified revision 1000000 in history has:unable to find specified revision [32mapps.sh:97: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:2.0: (B[m[32mapps.sh:98: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mdaemonset.apps/bind rolled back [32mapps.sh:101: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:latest: (B[m[32mapps.sh:102: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: ... skipping 36 lines ... Namespace: namespace-1614073901-27825 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1614073901-27825 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 18 lines ... Namespace: namespace-1614073901-27825 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 12 lines ... Namespace: namespace-1614073901-27825 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 27 lines ... Namespace: namespace-1614073901-27825 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1614073901-27825 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1614073901-27825 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 3 lines ... cpu: 100m memory: 100Mi Environment: GET_HOSTS_FROM: dns Mounts: <none> Volumes: <none> (B[mE0223 09:51:43.919320 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful describe Name: frontend Namespace: namespace-1614073901-27825 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 15 lines ... (B[m[32mcore.sh:1224: Successful get rc frontend {{.spec.replicas}}: 3 (B[mreplicationcontroller/frontend scaled E0223 09:51:44.147373 59929 replica_set.go:201] ReplicaSet has no controller: &ReplicaSet{ObjectMeta:{frontend namespace-1614073901-27825 16546636-79e4-4f85-b91c-e82908f37787 2037 2 2021-02-23 09:51:42 +0000 UTC <nil> <nil> map[app:guestbook tier:frontend] map[] [] [] [{kube-controller-manager Update v1 2021-02-23 09:51:42 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}}} {kubectl-create Update v1 2021-02-23 09:51:42 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:app":{},"f:tier":{}}},"f:spec":{"f:replicas":{},"f:selector":{".":{},"f:app":{},"f:tier":{}},"f:template":{".":{},"f:metadata":{".":{},"f:creationTimestamp":{},"f:labels":{".":{},"f:app":{},"f:tier":{}}},"f:spec":{".":{},"f:containers":{".":{},"k:{\"name\":\"php-redis\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"GET_HOSTS_FROM\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":80,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{".":{},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}}]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{app: guestbook,tier: frontend,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[app:guestbook tier:frontend] map[] [] [] []} {[] [] [{php-redis gcr.io/google_samples/gb-frontend:v4 [] [] [{ 0 80 TCP }] [] [{GET_HOSTS_FROM dns nil}] {map[] map[cpu:{{100 -3} {<nil>} 100m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc002ff1f98 <nil> ClusterFirst map[] <nil> false false false <nil> PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] <nil> nil [] <nil> <nil> <nil> map[] [] <nil>}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:3,FullyLabeledReplicas:3,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} I0223 09:51:44.154360 59929 event.go:291] "Event occurred" object="namespace-1614073901-27825/frontend" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: frontend-zg97f" [32mcore.sh:1228: Successful get rc frontend {{.spec.replicas}}: 2 (B[m[32mcore.sh:1232: Successful get rc frontend {{.spec.replicas}}: 2 (B[merror: Expected replicas to be 3, was 2 [32mcore.sh:1236: Successful get rc frontend {{.spec.replicas}}: 2 (B[m[32mcore.sh:1240: Successful get rc frontend {{.spec.replicas}}: 2 (B[mreplicationcontroller/frontend scaled I0223 09:51:44.815638 59929 event.go:291] "Event occurred" object="namespace-1614073901-27825/frontend" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-zs952" [32mcore.sh:1244: Successful get rc frontend {{.spec.replicas}}: 3 (B[m[32mcore.sh:1248: Successful get rc frontend {{.spec.replicas}}: 3 ... skipping 31 lines ... (B[mdeployment.apps "nginx-deployment" deleted Successful message:service/expose-test-deployment exposed has:service/expose-test-deployment exposed service "expose-test-deployment" deleted Successful message:error: couldn't retrieve selectors via --selector flag or introspection: invalid deployment: no selectors, therefore cannot be exposed See 'kubectl expose -h' for help and examples has:invalid deployment: no selectors deployment.apps/nginx-deployment created I0223 09:51:47.138998 59929 event.go:291] "Event occurred" object="namespace-1614073901-27825/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-76b5cd66f5 to 3" I0223 09:51:47.145103 59929 event.go:291] "Event occurred" object="namespace-1614073901-27825/nginx-deployment-76b5cd66f5" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-76b5cd66f5-pb6wp" I0223 09:51:47.152247 59929 event.go:291] "Event occurred" object="namespace-1614073901-27825/nginx-deployment-76b5cd66f5" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-76b5cd66f5-hj79d" ... skipping 23 lines ... service "frontend" deleted service "frontend-2" deleted service "frontend-3" deleted service "frontend-4" deleted service "frontend-5" deleted Successful message:error: cannot expose a Node has:cannot expose Successful message:The Service "invalid-large-service-name-that-has-more-than-sixty-three-characters" is invalid: metadata.name: Invalid value: "invalid-large-service-name-that-has-more-than-sixty-three-characters": must be no more than 63 characters has:metadata.name: Invalid value Successful message:service/kubernetes-serve-hostname-testing-sixty-three-characters-in-len exposed ... skipping 30 lines ... (B[mhorizontalpodautoscaler.autoscaling/frontend autoscaled [32mcore.sh:1391: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 70 (B[mhorizontalpodautoscaler.autoscaling "frontend" deleted horizontalpodautoscaler.autoscaling/frontend autoscaled [32mcore.sh:1395: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 2 3 80 (B[mhorizontalpodautoscaler.autoscaling "frontend" deleted Error: required flag(s) "max" not set E0223 09:51:52.583374 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource replicationcontroller "frontend" deleted [32mcore.sh:1404: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mapiVersion: apps/v1 kind: Deployment metadata: creationTimestamp: null ... skipping 24 lines ... limits: cpu: 300m requests: cpu: 300m terminationGracePeriodSeconds: 0 status: {} Error from server (NotFound): deployments.apps "nginx-deployment-resources" not found deployment.apps/nginx-deployment-resources created I0223 09:51:53.077214 59929 event.go:291] "Event occurred" object="namespace-1614073901-27825/nginx-deployment-resources" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-748ddcb48b to 3" I0223 09:51:53.081118 59929 event.go:291] "Event occurred" object="namespace-1614073901-27825/nginx-deployment-resources-748ddcb48b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-748ddcb48b-r8b8f" I0223 09:51:53.086572 59929 event.go:291] "Event occurred" object="namespace-1614073901-27825/nginx-deployment-resources-748ddcb48b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-748ddcb48b-tb7dd" I0223 09:51:53.087174 59929 event.go:291] "Event occurred" object="namespace-1614073901-27825/nginx-deployment-resources-748ddcb48b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-748ddcb48b-fkqxh" [32mcore.sh:1410: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx-deployment-resources: (B[m[32mcore.sh:1411: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[m[32mcore.sh:1412: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: (B[mdeployment.apps/nginx-deployment-resources resource requirements updated I0223 09:51:53.481015 59929 event.go:291] "Event occurred" object="namespace-1614073901-27825/nginx-deployment-resources" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-7bfb7d56b6 to 1" I0223 09:51:53.494042 59929 event.go:291] "Event occurred" object="namespace-1614073901-27825/nginx-deployment-resources-7bfb7d56b6" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-7bfb7d56b6-78sq2" [32mcore.sh:1415: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 100m: (B[m[32mcore.sh:1416: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}: 100m: (B[merror: unable to find container named redis I0223 09:51:53.815368 56228 client.go:360] parsed scheme: "passthrough" I0223 09:51:53.815462 56228 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{http://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>} I0223 09:51:53.815477 56228 clientconn.go:948] ClientConn switching balancer to "pick_first" deployment.apps/nginx-deployment-resources resource requirements updated I0223 09:51:53.913054 59929 event.go:291] "Event occurred" object="namespace-1614073901-27825/nginx-deployment-resources" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-deployment-resources-748ddcb48b to 2" I0223 09:51:53.920238 59929 event.go:291] "Event occurred" object="namespace-1614073901-27825/nginx-deployment-resources-748ddcb48b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-resources-748ddcb48b-r8b8f" ... skipping 158 lines ... status: "True" type: Progressing observedGeneration: 4 replicas: 4 unavailableReplicas: 4 updatedReplicas: 1 error: you must specify resources by --filename when --local is set. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' [32mcore.sh:1432: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 200m: (B[m[32mcore.sh:1433: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}: 300m: (B[m[32mcore.sh:1434: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}: 300m: ... skipping 46 lines ... pod-template-hash=69dd6dcd84 Annotations: deployment.kubernetes.io/desired-replicas: 1 deployment.kubernetes.io/max-replicas: 2 deployment.kubernetes.io/revision: 1 Controlled By: Deployment/test-nginx-apps Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=test-nginx-apps pod-template-hash=69dd6dcd84 Containers: nginx: Image: k8s.gcr.io/nginx:test-cmd ... skipping 102 lines ... [32mapps.sh:305: Successful get deployment.apps {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[m Image: k8s.gcr.io/nginx:test-cmd deployment.apps/nginx rolled back (server dry run) [32mapps.sh:309: Successful get deployment.apps {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[mdeployment.apps/nginx rolled back [32mapps.sh:313: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[merror: unable to find specified revision 1000000 in history [32mapps.sh:316: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[mdeployment.apps/nginx rolled back [32mapps.sh:320: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[mdeployment.apps/nginx paused error: you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/nginx' and try again error: deployments.apps "nginx" can't restart paused deployment (run rollout resume first) deployment.apps/nginx resumed deployment.apps/nginx rolled back deployment.kubernetes.io/revision-history: 1,3 error: desired revision (3) is different from the running revision (5) deployment.apps/nginx restarted I0223 09:52:06.130779 59929 event.go:291] "Event occurred" object="namespace-1614073915-15965/nginx" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-54785cbcb8 to 2" I0223 09:52:06.137245 59929 event.go:291] "Event occurred" object="namespace-1614073915-15965/nginx-54785cbcb8" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-54785cbcb8-ph7df" I0223 09:52:06.143679 59929 event.go:291] "Event occurred" object="namespace-1614073915-15965/nginx" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-7fcbc46b4f to 1" I0223 09:52:06.147731 59929 event.go:291] "Event occurred" object="namespace-1614073915-15965/nginx-7fcbc46b4f" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-7fcbc46b4f-wpr8z" I0223 09:52:06.957116 59929 horizontal.go:359] Horizontal Pod Autoscaler frontend has been deleted in namespace-1614073901-27825 ... skipping 81 lines ... (B[m[32mapps.sh:364: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: (B[mdeployment.apps/nginx-deployment image updated I0223 09:52:09.110382 59929 event.go:291] "Event occurred" object="namespace-1614073915-15965/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-6dd48b9849 to 1" I0223 09:52:09.115180 59929 event.go:291] "Event occurred" object="namespace-1614073915-15965/nginx-deployment-6dd48b9849" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-6dd48b9849-lrtxn" [32mapps.sh:367: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[m[32mapps.sh:368: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: (B[merror: unable to find container named "redis" deployment.apps/nginx-deployment image updated [32mapps.sh:373: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[m[32mapps.sh:374: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: (B[mdeployment.apps/nginx-deployment image updated [32mapps.sh:377: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[m[32mapps.sh:378: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: ... skipping 23 lines ... (B[mdeployment.apps/nginx-deployment env updated I0223 09:52:12.290931 59929 event.go:291] "Event occurred" object="namespace-1614073915-15965/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-59b7fccd97 to 1" I0223 09:52:12.296767 59929 event.go:291] "Event occurred" object="namespace-1614073915-15965/nginx-deployment-59b7fccd97" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-59b7fccd97-d525l" [32mapps.sh:408: Successful get deploy nginx-deployment {{ (index (index .spec.template.spec.containers 0).env 0).name}}: KEY_2 (B[m[32mapps.sh:410: Successful get deploy nginx-deployment {{ len (index .spec.template.spec.containers 0).env }}: 1 (B[mdeployment.apps/nginx-deployment env updated (dry run) E0223 09:52:12.693626 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource deployment.apps/nginx-deployment env updated (server dry run) [32mapps.sh:414: Successful get deploy nginx-deployment {{ len (index .spec.template.spec.containers 0).env }}: 1 (B[mdeployment.apps/nginx-deployment env updated I0223 09:52:13.057996 59929 event.go:291] "Event occurred" object="namespace-1614073915-15965/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-deployment-b8c4df945 to 2" I0223 09:52:13.067277 59929 event.go:291] "Event occurred" object="namespace-1614073915-15965/nginx-deployment-b8c4df945" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-b8c4df945-4xk6m" I0223 09:52:13.071954 59929 event.go:291] "Event occurred" object="namespace-1614073915-15965/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-7f789d7c5f to 1" ... skipping 14 lines ... I0223 09:52:13.577610 59929 event.go:291] "Event occurred" object="namespace-1614073915-15965/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-7584fc66fd to 1" deployment.apps/nginx-deployment env updated deployment.apps/nginx-deployment env updated I0223 09:52:13.804722 59929 event.go:291] "Event occurred" object="namespace-1614073915-15965/nginx-deployment-59b7fccd97" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-59b7fccd97-d525l" deployment.apps "nginx-deployment" deleted configmap "test-set-env-config" deleted E0223 09:52:13.999932 59929 replica_set.go:532] sync "namespace-1614073915-15965/nginx-deployment-7584fc66fd" failed with replicasets.apps "nginx-deployment-7584fc66fd" not found secret "test-set-env-secret" deleted +++ exit code: 0 Recording: run_rs_tests Running command: run_rs_tests +++ Running case: test-cmd.run_rs_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_rs_tests E0223 09:52:14.149800 59929 replica_set.go:532] sync "namespace-1614073915-15965/nginx-deployment-68d657fb6" failed with replicasets.apps "nginx-deployment-68d657fb6" not found +++ [0223 09:52:14] Creating namespace namespace-1614073934-8780 E0223 09:52:14.200149 59929 replica_set.go:532] sync "namespace-1614073915-15965/nginx-deployment-b8c4df945" failed with replicasets.apps "nginx-deployment-b8c4df945" not found namespace/namespace-1614073934-8780 created E0223 09:52:14.249619 59929 replica_set.go:532] sync "namespace-1614073915-15965/nginx-deployment-57ddd474c4" failed with replicasets.apps "nginx-deployment-57ddd474c4" not found E0223 09:52:14.299693 59929 replica_set.go:532] sync "namespace-1614073915-15965/nginx-deployment-59b7fccd97" failed with replicasets.apps "nginx-deployment-59b7fccd97" not found Context "test" modified. +++ [0223 09:52:14] Testing kubectl(v1:replicasets) [32mapps.sh:541: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicaset.apps/frontend created I0223 09:52:14.627826 59929 event.go:291] "Event occurred" object="namespace-1614073934-8780/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-v9bqt" +++ [0223 09:52:14] Deleting rs I0223 09:52:14.631652 59929 event.go:291] "Event occurred" object="namespace-1614073934-8780/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-x5qbv" I0223 09:52:14.634213 59929 event.go:291] "Event occurred" object="namespace-1614073934-8780/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-brff9" replicaset.apps "frontend" deleted E0223 09:52:14.748678 59929 replica_set.go:532] sync "namespace-1614073934-8780/frontend" failed with replicasets.apps "frontend" not found [32mapps.sh:547: Successful get pods -l "tier=frontend" {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:551: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicaset.apps/frontend created I0223 09:52:15.141055 59929 event.go:291] "Event occurred" object="namespace-1614073934-8780/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-245mf" I0223 09:52:15.147404 59929 event.go:291] "Event occurred" object="namespace-1614073934-8780/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-qgpgq" I0223 09:52:15.149140 59929 event.go:291] "Event occurred" object="namespace-1614073934-8780/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-58h8k" [32mapps.sh:555: Successful get pods -l "tier=frontend" {{range.items}}{{(index .spec.containers 0).name}}:{{end}}: php-redis:php-redis:php-redis: (B[m+++ [0223 09:52:15] Deleting rs replicaset.apps "frontend" deleted E0223 09:52:15.449332 59929 replica_set.go:532] sync "namespace-1614073934-8780/frontend" failed with replicasets.apps "frontend" not found [32mapps.sh:559: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:561: Successful get pods -l "tier=frontend" {{range.items}}{{(index .spec.containers 0).name}}:{{end}}: php-redis:php-redis:php-redis: (B[mI0223 09:52:15.580122 59929 horizontal.go:359] Horizontal Pod Autoscaler nginx-deployment has been deleted in namespace-1614073915-15965 pod "frontend-245mf" deleted pod "frontend-58h8k" deleted pod "frontend-qgpgq" deleted ... skipping 16 lines ... Namespace: namespace-1614073934-8780 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1614073934-8780 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 18 lines ... Namespace: namespace-1614073934-8780 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 12 lines ... Namespace: namespace-1614073934-8780 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 25 lines ... Namespace: namespace-1614073934-8780 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1614073934-8780 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1614073934-8780 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 11 lines ... Namespace: namespace-1614073934-8780 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 156 lines ... (B[m[32mapps.sh:625: Successful get deploy scale-2 {{.spec.replicas}}: 3 (B[m[32mapps.sh:626: Successful get deploy scale-3 {{.spec.replicas}}: 3 (B[mreplicaset.apps "frontend" deleted deployment.apps "scale-1" deleted deployment.apps "scale-2" deleted deployment.apps "scale-3" deleted E0223 09:52:20.438125 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource replicaset.apps/frontend created I0223 09:52:20.658606 59929 event.go:291] "Event occurred" object="namespace-1614073934-8780/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-47jdr" I0223 09:52:20.662754 59929 event.go:291] "Event occurred" object="namespace-1614073934-8780/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-lhk69" I0223 09:52:20.667271 59929 event.go:291] "Event occurred" object="namespace-1614073934-8780/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-n69x7" [32mapps.sh:634: Successful get rs frontend {{.spec.replicas}}: 3 (B[mservice/frontend exposed ... skipping 43 lines ... I0223 09:52:24.844690 59929 event.go:291] "Event occurred" object="namespace-1614073934-8780/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-858c7" [32mapps.sh:699: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: frontend: (B[mhorizontalpodautoscaler.autoscaling/frontend autoscaled [32mapps.sh:702: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 70 (B[mhorizontalpodautoscaler.autoscaling "frontend" deleted horizontalpodautoscaler.autoscaling/frontend autoscaled E0223 09:52:25.380054 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mapps.sh:706: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 2 3 80 (B[mSuccessful message:kubectl-autoscale has:kubectl-autoscale horizontalpodautoscaler.autoscaling "frontend" deleted Error: required flag(s) "max" not set replicaset.apps "frontend" deleted +++ exit code: 0 Recording: run_stateful_set_tests Running command: run_stateful_set_tests +++ Running case: test-cmd.run_stateful_set_tests ... skipping 61 lines ... (B[m[32mapps.sh:466: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/pause:2.0: (B[m[32mapps.sh:467: Successful get statefulset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2 (B[mstatefulset.apps/nginx rolled back [32mapps.sh:470: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx-slim:0.7: (B[m[32mapps.sh:471: Successful get statefulset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mSuccessful message:error: unable to find specified revision 1000000 in history has:unable to find specified revision [32mapps.sh:475: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx-slim:0.7: (B[m[32mapps.sh:476: Successful get statefulset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mstatefulset.apps/nginx rolled back [32mapps.sh:479: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx-slim:0.8: (B[m[32mapps.sh:480: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/pause:2.0: ... skipping 61 lines ... Name: mock Namespace: namespace-1614073951-13485 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: k8s.gcr.io/pause:3.4.1 Port: 9949/TCP ... skipping 59 lines ... Name: mock Namespace: namespace-1614073951-13485 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: k8s.gcr.io/pause:3.4.1 Port: 9949/TCP ... skipping 59 lines ... Name: mock Namespace: namespace-1614073951-13485 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: k8s.gcr.io/pause:3.4.1 Port: 9949/TCP ... skipping 45 lines ... Namespace: namespace-1614073951-13485 Selector: app=mock Labels: app=mock status=replaced Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: k8s.gcr.io/pause:3.4.1 Port: 9949/TCP ... skipping 11 lines ... Namespace: namespace-1614073951-13485 Selector: app=mock2 Labels: app=mock2 status=replaced Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock2 Containers: mock-container: Image: k8s.gcr.io/pause:3.4.1 Port: 9949/TCP ... skipping 109 lines ... +++ [0223 09:52:45] Creating namespace namespace-1614073965-10974 namespace/namespace-1614073965-10974 created Context "test" modified. +++ [0223 09:52:45] Testing persistent volumes [32mstorage.sh:30: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: (B[mpersistentvolume/pv0001 created E0223 09:52:46.308279 59929 pv_protection_controller.go:118] PV pv0001 failed with : Operation cannot be fulfilled on persistentvolumes "pv0001": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:33: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0001: (B[mpersistentvolume "pv0001" deleted persistentvolume/pv0002 created E0223 09:52:46.719345 59929 pv_protection_controller.go:118] PV pv0002 failed with : Operation cannot be fulfilled on persistentvolumes "pv0002": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:36: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0002: (B[mpersistentvolume "pv0002" deleted persistentvolume/pv0003 created [32mstorage.sh:39: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0003: (B[mpersistentvolume "pv0003" deleted [32mstorage.sh:42: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: ... skipping 32 lines ... [32mstorage.sh:71: Successful get pvc {{range.items}}{{.metadata.name}}:{{end}}: myclaim-2: (B[mpersistentvolumeclaim "myclaim-2" deleted I0223 09:52:49.102596 59929 event.go:291] "Event occurred" object="namespace-1614073968-24637/myclaim-2" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="FailedBinding" message="no persistent volumes available for this claim and no storage class is set" persistentvolumeclaim/myclaim-3 created I0223 09:52:49.328814 59929 event.go:291] "Event occurred" object="namespace-1614073968-24637/myclaim-3" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="FailedBinding" message="no persistent volumes available for this claim and no storage class is set" I0223 09:52:49.333340 59929 event.go:291] "Event occurred" object="namespace-1614073968-24637/myclaim-3" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="FailedBinding" message="no persistent volumes available for this claim and no storage class is set" E0223 09:52:49.408947 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mstorage.sh:75: Successful get pvc {{range.items}}{{.metadata.name}}:{{end}}: myclaim-3: (B[mI0223 09:52:49.524602 59929 event.go:291] "Event occurred" object="namespace-1614073968-24637/myclaim-3" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="FailedBinding" message="no persistent volumes available for this claim and no storage class is set" persistentvolumeclaim "myclaim-3" deleted [32mstorage.sh:78: Successful get pvc {{range.items}}{{.metadata.name}}:{{end}}: (B[m+++ exit code: 0 Recording: run_storage_class_tests ... skipping 14 lines ... Running command: run_nodes_tests +++ Running case: test-cmd.run_nodes_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_nodes_tests +++ [0223 09:52:50] Testing kubectl(v1:nodes) E0223 09:52:50.625207 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcore.sh:1535: Successful get nodes {{range.items}}{{.metadata.name}}:{{end}}: 127.0.0.1: (B[mmatched Name: matched Labels: matched CreationTimestamp: matched Conditions: matched Addresses: ... skipping 4 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 09:47:07 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 09:47:07 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 31 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 09:47:07 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 09:47:07 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 38 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 09:47:07 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 09:47:07 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 09:47:07 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 29 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 09:47:07 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 09:47:07 +0000 Tue, 23 Feb 2021 09:48:08 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 132 lines ... yes has:the server doesn't have a resource type Successful message:yes has:yes Successful message:error: --subresource can not be used with NonResourceURL has:subresource can not be used with NonResourceURL Successful Successful message:yes 0 has:0 ... skipping 59 lines ... {Verbs:[get list watch] APIGroups:[] Resources:[configmaps] ResourceNames:[] NonResourceURLs:[]} [32mlegacy-script.sh:846: Successful get rolebindings -n some-other-random -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-RB: (B[m[32mlegacy-script.sh:847: Successful get roles -n some-other-random -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-R: (B[m[32mlegacy-script.sh:848: Successful get clusterrolebindings -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-CRB: (B[m[32mlegacy-script.sh:849: Successful get clusterroles -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-CR: (B[mSuccessful message:error: only rbac.authorization.k8s.io/v1 is supported: not *v1beta1.ClusterRole has:only rbac.authorization.k8s.io/v1 is supported rolebinding.rbac.authorization.k8s.io "testing-RB" deleted role.rbac.authorization.k8s.io "testing-R" deleted warning: deleting cluster-scoped resources, not scoped to the provided namespace clusterrole.rbac.authorization.k8s.io "testing-CR" deleted clusterrolebinding.rbac.authorization.k8s.io "testing-CRB" deleted ... skipping 24 lines ... [32mdiscovery.sh:91: Successful get all -l'app=cassandra' {{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}: cassandra:cassandra:cassandra:cassandra: (B[mpod "cassandra-m87zx" deleted I0223 09:52:57.060844 59929 event.go:291] "Event occurred" object="namespace-1614073976-15414/cassandra" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: cassandra-flqg8" pod "cassandra-xspcm" deleted I0223 09:52:57.073064 59929 event.go:291] "Event occurred" object="namespace-1614073976-15414/cassandra" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: cassandra-dk2nd" replicationcontroller "cassandra" deleted E0223 09:52:57.079909 59929 replica_set.go:532] sync "namespace-1614073976-15414/cassandra" failed with replicationcontrollers "cassandra" not found service "cassandra" deleted +++ exit code: 0 Recording: run_kubectl_explain_tests Running command: run_kubectl_explain_tests +++ Running case: test-cmd.run_kubectl_explain_tests ... skipping 362 lines ... namespace-1614073968-24637 default 0 14s namespace-1614073976-15414 default 0 6s some-other-random default 0 7s has:all-ns-test-2 namespace "all-ns-test-1" deleted namespace "all-ns-test-2" deleted E0223 09:53:09.435389 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0223 09:53:12.800107 59929 namespace_controller.go:185] Namespace has been deleted all-ns-test-1 E0223 09:53:13.099650 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mget.sh:376: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mwarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mget.sh:380: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mget.sh:384: Successful get nodes {{range.items}}{{.metadata.name}}:{{end}}: 127.0.0.1: (B[mSuccessful ... skipping 570 lines ... message:node/127.0.0.1 already uncordoned (server dry run) has:already uncordoned [32mnode-management.sh:145: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[mnode/127.0.0.1 labeled [32mnode-management.sh:150: Successful get nodes 127.0.0.1 {{.metadata.labels.test}}: label (B[mSuccessful message:error: cannot specify both a node name and a --selector option See 'kubectl drain -h' for help and examples has:cannot specify both a node name Successful message:error: USAGE: cordon NODE [flags] See 'kubectl cordon -h' for help and examples has:error\: USAGE\: cordon NODE node/127.0.0.1 already uncordoned Successful message:error: You must provide one or more resources by argument or filename. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' '<resource> <name>' '<resource>' has:must provide one or more resources ... skipping 14 lines ... +++ [0223 09:53:28] Testing kubectl plugins Successful message:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/version/kubectl-version - warning: kubectl-version overwrites existing command: "kubectl version" error: one plugin warning was found has:kubectl-version overwrites existing command: "kubectl version" Successful message:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/kubectl-foo test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo - warning: test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin: test/fixtures/pkg/kubectl/plugins/kubectl-foo error: one plugin warning was found has:test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin Successful message:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/kubectl-foo has:plugins are available Successful message:Unable to read directory "test/fixtures/pkg/kubectl/plugins/empty" from your PATH: open test/fixtures/pkg/kubectl/plugins/empty: no such file or directory. Skipping... error: unable to find any kubectl plugins in your PATH has:unable to find any kubectl plugins in your PATH Successful message:I am plugin foo has:plugin foo Successful message:I am plugin bar called with args test/fixtures/pkg/kubectl/plugins/bar/kubectl-bar arg1 ... skipping 10 lines ... +++ Running case: test-cmd.run_impersonation_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_impersonation_tests +++ [0223 09:53:29] Testing impersonation Successful message:error: requesting groups or user-extra for test-admin without impersonating a user has:without impersonating a user Warning: certificates.k8s.io/v1beta1 CertificateSigningRequest is deprecated in v1.19+, unavailable in v1.22+; use certificates.k8s.io/v1 CertificateSigningRequest certificatesigningrequest.certificates.k8s.io/foo created [32mauthorization.sh:68: Successful get csr/foo {{.spec.username}}: user1 (B[m[32mauthorization.sh:69: Successful get csr/foo {{range .spec.groups}}{{.}}{{end}}: system:authenticated (B[mWarning: certificates.k8s.io/v1beta1 CertificateSigningRequest is deprecated in v1.19+, unavailable in v1.22+; use certificates.k8s.io/v1 CertificateSigningRequest certificatesigningrequest.certificates.k8s.io "foo" deleted Warning: certificates.k8s.io/v1beta1 CertificateSigningRequest is deprecated in v1.19+, unavailable in v1.22+; use certificates.k8s.io/v1 CertificateSigningRequest certificatesigningrequest.certificates.k8s.io/foo created E0223 09:53:30.455414 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mauthorization.sh:74: Successful get csr/foo {{len .spec.groups}}: 4 (B[m[32mauthorization.sh:75: Successful get csr/foo {{range .spec.groups}}{{.}} {{end}}: group2 group1 ,,,chameleon system:authenticated (B[mWarning: certificates.k8s.io/v1beta1 CertificateSigningRequest is deprecated in v1.19+, unavailable in v1.22+; use certificates.k8s.io/v1 CertificateSigningRequest certificatesigningrequest.certificates.k8s.io "foo" deleted +++ exit code: 0 Recording: run_wait_tests ... skipping 28 lines ... Running command: run_kubectl_debug_pod_tests +++ Running case: test-cmd.run_kubectl_debug_pod_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_kubectl_debug_pod_tests +++ [0223 09:53:33] Creating namespace namespace-1614074013-24554 E0223 09:53:33.527819 59929 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource namespace/namespace-1614074013-24554 created Context "test" modified. +++ [0223 09:53:33] Testing kubectl debug (pod tests) pod/target created [32mdebug.sh:32: Successful get pod {{range.items}}{{.metadata.name}}:{{end}}: target: (B[m[32mdebug.sh:36: Successful get pod {{range.items}}{{.metadata.name}}:{{end}}: target:target-copy: ... skipping 74 lines ... I0223 09:53:37.525630 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.525740 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.525772 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.525800 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.525828 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.525837 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.525905 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.525929 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526026 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526029 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526036 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526092 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526116 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526145 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.526148 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.526241 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526244 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526285 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.526286 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.526333 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.526335 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.526377 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.526386 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526402 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526489 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526505 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526541 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526581 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526593 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526627 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526683 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526713 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526735 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.526759 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.526771 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526834 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526879 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526902 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526933 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.526969 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.525905 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.527055 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.527084 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.527149 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.527244 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.527248 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.527307 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.527330 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.527371 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.527417 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.527539 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.527606 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.527619 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.527664 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.527698 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.527718 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.527736 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.527029 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.527769 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.527775 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.527785 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.527820 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.527844 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.527821 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.527866 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.527877 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.527879 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.527825 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.527908 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.527956 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.527964 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.528017 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.528030 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.528039 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.528054 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.528088 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.528150 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.528219 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.528286 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.528357 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.528422 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.528508 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 09:53:37.528598 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.528635 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.528677 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.528685 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.528711 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.528743 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.528768 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.528805 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.528823 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.528867 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.528875 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.528925 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.528981 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.528997 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 09:53:37.529032 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529079 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529084 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529116 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529128 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529146 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529177 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529181 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529207 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529221 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529080 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529236 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529269 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529276 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529331 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529414 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529472 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529537 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529582 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529540 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529614 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529631 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529741 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.529769 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530108 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530123 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530170 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530205 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530209 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530216 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530238 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530275 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530281 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530318 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530323 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530322 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530345 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530359 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530284 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:37.530383 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 09:53:37.527424 56228 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick junit report dir: /logs/artifacts +++ [0223 09:53:37] Clean up complete + make test-integration W0223 09:53:38.526350 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.526494 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.526494 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.526541 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.526602 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.527104 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.527178 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.527384 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.527867 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.527926 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.527963 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.527977 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.527986 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.527997 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.528026 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.528045 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.528063 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.528117 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.528148 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.528208 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.528210 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.528226 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.528262 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.528274 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.528286 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.528382 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.528387 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529073 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529088 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529113 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529125 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529148 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529155 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529150 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529072 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529237 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529318 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529368 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529408 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529424 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529424 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529429 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529469 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529473 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529482 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529509 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529538 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529545 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529540 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529598 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529614 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529738 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529751 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529761 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529759 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529771 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529787 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529858 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.529923 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.530489 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.530514 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.530519 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.530558 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.530585 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.530630 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.530686 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.530726 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.530691 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.530695 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.530799 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.530695 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.530641 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.530850 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.530732 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:38.531148 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.810512 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.820630 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.831131 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.836265 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.838854 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.841692 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.845647 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.848099 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.854572 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.860834 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.864916 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.877957 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.890308 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.913406 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.913406 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.923987 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.927281 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.934630 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.935536 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.941609 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.946923 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.947561 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.962796 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.964667 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.967890 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:39.968772 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.020117 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.039032 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.042421 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.043485 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.064747 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.093613 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.098508 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.103869 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.112279 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.119416 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.122981 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.124205 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.134073 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.147394 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.147980 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.149441 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.151604 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.159685 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.182165 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.194020 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.205879 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.216525 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.228926 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.234244 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.235998 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.243284 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.251207 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.255043 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.269246 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.273810 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.297095 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.300672 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.302182 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.314920 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.322924 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.335300 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.344253 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.346498 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.356758 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.381015 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.386636 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.390834 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.397235 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.404061 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.407619 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.417220 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.417932 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.435486 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:40.446241 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:41.918061 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:42.012774 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:42.102081 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:42.124110 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:42.128149 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:42.138921 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:42.172240 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:42.175110 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 09:53:42.222508 56228 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... +++ [0223 09:53:42] Checking etcd is on PATH /home/prow/go/src/k8s.io/kubernetes/third_party/etcd/etcd +++ [0223 09:53:42] Starting etcd instance etcd --advertise-client-urls http://127.0.0.1:2379 --data-dir /tmp/tmp.ZNIkAGFA4m --listen-client-urls http://127.0.0.1:2379 --log-level=debug > "/logs/artifacts/etcd.998f39db-75ba-11eb-a510-0e7563c59f3a.root.log.DEBUG.20210223-095342.95469" 2>/dev/null Waiting for etcd to come up. +++ [0223 09:53:42] On try 2, etcd: : {"health":"true"} ... skipping 4 lines ...