PR | cofyc: Prioritizing nodes based on volume capacity |
Result | FAILURE |
Tests | 0 failed / 135 succeeded |
Started | |
Elapsed | 15m19s |
Revision | e681fffa7bba2670ee30041d3207c654bf6fae7b |
Refs |
96347 |
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/shell_not_expected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/unsupported_shell_type
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/accept_a_valid_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_negative_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_non-string_port
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_too_large_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitCertPhaseCSR
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitCertPhaseCSR/fails_on_CSR
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitCertPhaseCSR/fails_on_all
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitCertPhaseCSR/generate_CSR
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_v1alpha1_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_v1alpha2_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_v1alpha3_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_current_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_v1beta1_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_v1beta2_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/don't_allow_mixed_arguments_v1beta1
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/don't_allow_mixed_arguments_v1beta2
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/fail_on_non_existing_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/feature_gate_IPv6DualStack=true
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/feature_gate_PublicKeysECDSA=true
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/no_feature_gates_passed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/invalid_semantic_version_string_is_detected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/valid_version_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_non-lowercase
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_size
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/valid_token_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed/discovery-token_and_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_and_discovery-file_can't_both_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_or_discovery-file_must_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/invalid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/valid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName/valid_node_name
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/invalid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/no_token_provided
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerate
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerateTypoError
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/default_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/invalid_output_option
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/short_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/json_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/yaml_output
test-cmd run_RESTMapper_evaluation_tests
test-cmd run_assert_categories_tests
test-cmd run_assert_short_name_tests
test-cmd run_authorization_tests
test-cmd run_certificates_tests
test-cmd run_client_config_tests
test-cmd run_cluster_management_tests
test-cmd run_clusterroles_tests
test-cmd run_configmap_tests
test-cmd run_convert_tests
test-cmd run_crd_tests
test-cmd run_create_job_tests
test-cmd run_create_secret_tests
test-cmd run_daemonset_history_tests
test-cmd run_daemonset_tests
test-cmd run_deployment_tests
test-cmd run_exec_credentials_tests
test-cmd run_impersonation_tests
test-cmd run_job_tests
test-cmd run_kubectl_all_namespace_tests
test-cmd run_kubectl_apply_deployments_tests
test-cmd run_kubectl_apply_tests
test-cmd run_kubectl_config_set_cluster_tests
test-cmd run_kubectl_config_set_credentials_tests
test-cmd run_kubectl_config_set_tests
test-cmd run_kubectl_create_error_tests
test-cmd run_kubectl_create_filter_tests
test-cmd run_kubectl_create_kustomization_directory_tests
test-cmd run_kubectl_debug_node_tests
test-cmd run_kubectl_debug_pod_tests
test-cmd run_kubectl_delete_allnamespaces_tests
test-cmd run_kubectl_diff_same_names
test-cmd run_kubectl_diff_tests
test-cmd run_kubectl_exec_pod_tests
test-cmd run_kubectl_exec_resource_name_tests
test-cmd run_kubectl_explain_tests
test-cmd run_kubectl_get_tests
test-cmd run_kubectl_local_proxy_tests
test-cmd run_kubectl_request_timeout_tests
test-cmd run_kubectl_run_tests
test-cmd run_kubectl_server_side_apply_tests
test-cmd run_kubectl_sort_by_tests
test-cmd run_kubectl_version_tests
test-cmd run_lists_tests
test-cmd run_multi_resources_tests
test-cmd run_namespace_tests
test-cmd run_nodes_tests
test-cmd run_persistent_volume_claims_tests
test-cmd run_persistent_volumes_tests
test-cmd run_plugins_tests
test-cmd run_pod_templates_tests
test-cmd run_pod_tests
test-cmd run_rc_tests
test-cmd run_resource_aliasing_tests
test-cmd run_retrieve_multiple_tests
test-cmd run_role_tests
test-cmd run_rs_tests
test-cmd run_save_config_tests
test-cmd run_secrets_test
test-cmd run_service_accounts_tests
test-cmd run_service_tests
test-cmd run_stateful_set_tests
test-cmd run_statefulset_history_tests
test-cmd run_storage_class_tests
test-cmd run_swagger_tests
test-cmd run_template_output_tests
test-cmd run_wait_tests
... skipping 70 lines ... Recording: record_command_canary Running command: record_command_canary +++ Running case: test-cmd.record_command_canary +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: record_command_canary /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh: line 156: bogus-expected-to-fail: command not found !!! [0223 10:11:41] Call tree: !!! [0223 10:11:41] 1: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:47 record_command_canary(...) !!! [0223 10:11:41] 2: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:112 eVal(...) !!! [0223 10:11:41] 3: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:132 juLog(...) !!! [0223 10:11:41] 4: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:160 record_command(...) !!! [0223 10:11:41] 5: hack/make-rules/test-cmd.sh:35 source(...) +++ exit code: 1 +++ error: 1 +++ [0223 10:11:41] Running kubeadm tests +++ [0223 10:11:46] Building go targets for linux/amd64: cmd/kubeadm +++ [0223 10:12:26] Running tests without code coverage {"Time":"2021-02-23T10:13:48.000886345Z","Action":"output","Package":"k8s.io/kubernetes/cmd/kubeadm/test/cmd","Output":"ok \tk8s.io/kubernetes/cmd/kubeadm/test/cmd\t47.575s\n"} ✓ cmd/kubeadm/test/cmd (47.579s) ... skipping 372 lines ... I0223 10:16:05.240671 56345 client.go:360] parsed scheme: "passthrough" I0223 10:16:05.240722 56345 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{http://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>} I0223 10:16:05.240731 56345 clientconn.go:948] ClientConn switching balancer to "pick_first" +++ [0223 10:16:08] Generate kubeconfig for controller-manager +++ [0223 10:16:08] Starting controller-manager I0223 10:16:09.470280 60038 serving.go:347] Generated self-signed cert in-memory W0223 10:16:09.841140 60038 authentication.go:410] failed to read in-cluster kubeconfig for delegated authentication: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W0223 10:16:09.841195 60038 authentication.go:307] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. W0223 10:16:09.841202 60038 authentication.go:331] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. W0223 10:16:09.841252 60038 authorization.go:216] failed to read in-cluster kubeconfig for delegated authorization: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W0223 10:16:09.841268 60038 authorization.go:184] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. I0223 10:16:09.841298 60038 controllermanager.go:174] Version: v1.21.0-alpha.3.456+c7e85d33636431 I0223 10:16:09.843810 60038 secure_serving.go:197] Serving securely on [::]:10257 I0223 10:16:09.843936 60038 tlsconfig.go:240] Starting DynamicServingCertificateController I0223 10:16:09.845149 60038 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 I0223 10:16:09.845583 60038 leaderelection.go:243] attempting to acquire leader lease kube-system/kube-controller-manager... ... skipping 48 lines ... W0223 10:16:10.324081 60038 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0223 10:16:10.324110 60038 controllermanager.go:540] Started "horizontalpodautoscaling" W0223 10:16:10.324167 60038 controllermanager.go:532] Skipping "nodeipam" I0223 10:16:10.324263 60038 horizontal.go:169] Starting HPA controller I0223 10:16:10.324298 60038 shared_informer.go:240] Waiting for caches to sync for HPA I0223 10:16:10.324401 60038 node_lifecycle_controller.go:76] Sending events to api server E0223 10:16:10.324439 60038 core.go:231] failed to start cloud node lifecycle controller: no cloud provider provided W0223 10:16:10.324449 60038 controllermanager.go:532] Skipping "cloud-node-lifecycle" I0223 10:16:10.324675 60038 controllermanager.go:540] Started "podgc" I0223 10:16:10.324743 60038 gc_controller.go:89] Starting GC controller I0223 10:16:10.324764 60038 shared_informer.go:240] Waiting for caches to sync for GC W0223 10:16:10.324901 60038 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. W0223 10:16:10.324929 60038 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. ... skipping 98 lines ... I0223 10:16:10.797191 60038 pvc_protection_controller.go:110] "Starting PVC protection controller" I0223 10:16:10.797217 60038 shared_informer.go:240] Waiting for caches to sync for PVC protection I0223 10:16:10.797532 60038 controllermanager.go:540] Started "endpointslice" W0223 10:16:10.797555 60038 controllermanager.go:519] "tokencleaner" is disabled I0223 10:16:10.797696 60038 endpointslice_controller.go:237] Starting endpoint slice controller I0223 10:16:10.797707 60038 shared_informer.go:240] Waiting for caches to sync for endpoint_slice E0223 10:16:10.797910 60038 core.go:91] Failed to start service controller: WARNING: no cloud provider provided, services of type LoadBalancer will fail W0223 10:16:10.797923 60038 controllermanager.go:532] Skipping "service" I0223 10:16:10.820757 60038 shared_informer.go:247] Caches are synced for ClusterRoleAggregator E0223 10:16:10.837076 60038 clusterroleaggregation_controller.go:181] admin failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "admin": the object has been modified; please apply your changes to the latest version and try again E0223 10:16:10.839232 60038 clusterroleaggregation_controller.go:181] view failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "view": the object has been modified; please apply your changes to the latest version and try again I0223 10:16:10.839862 60038 shared_informer.go:247] Caches are synced for crt configmap E0223 10:16:10.845528 60038 clusterroleaggregation_controller.go:181] admin failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "admin": the object has been modified; please apply your changes to the latest version and try again node/127.0.0.1 created W0223 10:16:10.859529 60038 actual_state_of_world.go:534] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="127.0.0.1" does not exist +++ [0223 10:16:10] Checking kubectl version I0223 10:16:10.896066 60038 shared_informer.go:247] Caches are synced for TTL I0223 10:16:10.923391 60038 shared_informer.go:247] Caches are synced for certificate-csrapproving I0223 10:16:10.923428 60038 shared_informer.go:247] Caches are synced for endpoint_slice_mirroring I0223 10:16:10.923442 60038 shared_informer.go:247] Caches are synced for expand I0223 10:16:10.923482 60038 shared_informer.go:247] Caches are synced for PV protection ... skipping 34 lines ... W0223 10:16:11.251799 60038 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. W0223 10:16:11.251966 60038 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. W0223 10:16:11.252014 60038 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. W0223 10:16:11.252321 60038 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. W0223 10:16:11.252688 60038 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0223 10:16:11.253182 60038 shared_informer.go:240] Waiting for caches to sync for garbage collector The Service "kubernetes" is invalid: spec.clusterIPs: Invalid value: []string{"10.0.0.1"}: failed to allocated ip:10.0.0.1 with error:provided IP is already allocated NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 34s Recording: run_kubectl_version_tests Running command: run_kubectl_version_tests +++ Running case: test-cmd.run_kubectl_version_tests ... skipping 103 lines ... +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_RESTMapper_evaluation_tests +++ [0223 10:16:15] Creating namespace namespace-1614075375-20479 namespace/namespace-1614075375-20479 created Context "test" modified. +++ [0223 10:16:16] Testing RESTMapper +++ [0223 10:16:16] "kubectl get unknownresourcetype" returns error as expected: error: the server doesn't have a resource type "unknownresourcetype" +++ exit code: 0 NAME SHORTNAMES APIVERSION NAMESPACED KIND bindings v1 true Binding componentstatuses cs v1 false ComponentStatus configmaps cm v1 true ConfigMap endpoints ep v1 true Endpoints ... skipping 62 lines ... namespace/namespace-1614075380-25402 created Context "test" modified. +++ [0223 10:16:20] Testing clusterroles [32mrbac.sh:29: Successful get clusterroles/cluster-admin {{.metadata.name}}: cluster-admin (B[m[32mrbac.sh:30: Successful get clusterrolebindings/cluster-admin {{.metadata.name}}: cluster-admin (B[mSuccessful message:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created (dry run) clusterrole.rbac.authorization.k8s.io/pod-admin created (server dry run) Successful message:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:42: Successful get clusterrole/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[mSuccessful message:warning: deleting cluster-scoped resources, not scoped to the provided namespace clusterrole.rbac.authorization.k8s.io "pod-admin" deleted ... skipping 18 lines ... (B[mclusterrole.rbac.authorization.k8s.io/url-reader created [32mrbac.sh:61: Successful get clusterrole/url-reader {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: get: (B[m[32mrbac.sh:62: Successful get clusterrole/url-reader {{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}: /logs/*:/healthz/*: (B[mclusterrole.rbac.authorization.k8s.io/aggregation-reader created [32mrbac.sh:64: Successful get clusterrole/aggregation-reader {{.metadata.name}}: aggregation-reader (B[mSuccessful message:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin created (server dry run) Successful message:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created [32mrbac.sh:77: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: (B[mclusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (server dry run) [32mrbac.sh:80: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: ... skipping 63 lines ... [32mrbac.sh:102: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin:foo:test-all-user: (B[m[32mrbac.sh:103: Successful get clusterrolebinding/super-group {{range.subjects}}{{.name}}:{{end}}: the-group:foo:test-all-user: (B[m[32mrbac.sh:104: Successful get clusterrolebinding/super-sa {{range.subjects}}{{.name}}:{{end}}: sa-name:foo:test-all-user: (B[mrolebinding.rbac.authorization.k8s.io/admin created (dry run) rolebinding.rbac.authorization.k8s.io/admin created (server dry run) Successful message:Error from server (NotFound): rolebindings.rbac.authorization.k8s.io "admin" not found has: not found rolebinding.rbac.authorization.k8s.io/admin created [32mrbac.sh:113: Successful get rolebinding/admin {{.roleRef.kind}}: ClusterRole (B[m[32mrbac.sh:114: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin: (B[mrolebinding.rbac.authorization.k8s.io/admin subjects updated [32mrbac.sh:116: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin:foo: ... skipping 29 lines ... message:Warning: rbac.authorization.k8s.io/v1beta1 Role is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 Role No resources found in namespace-1614075387-16907 namespace. has:Role is deprecated Successful message:Warning: rbac.authorization.k8s.io/v1beta1 Role is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 Role No resources found in namespace-1614075387-16907 namespace. Error: 1 warning received has:Role is deprecated Successful message:Warning: rbac.authorization.k8s.io/v1beta1 Role is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 Role No resources found in namespace-1614075387-16907 namespace. Error: 1 warning received has:Error: 1 warning received role.rbac.authorization.k8s.io/pod-admin created (dry run) role.rbac.authorization.k8s.io/pod-admin created (server dry run) Successful message:Error from server (NotFound): roles.rbac.authorization.k8s.io "pod-admin" not found has: not found role.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:163: Successful get role/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[m[32mrbac.sh:164: Successful get role/pod-admin {{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}: pods: (B[m[32mrbac.sh:165: Successful get role/pod-admin {{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}: : (B[mSuccessful ... skipping 412 lines ... has:valid-pod Successful message:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 1s has:valid-pod [32mcore.sh:190: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: resource(s) were provided, but no name, label selector, or --all flag specified [32mcore.sh:194: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:198: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: setting 'all' parameter but found a non empty selector. [32mcore.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mwarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mcore.sh:210: Successful get pods -l'name in (valid-pod)' {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mcore.sh:215: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:: : ... skipping 19 lines ... (B[mpoddisruptionbudget.policy/test-pdb-2 created [32mcore.sh:259: Successful get pdb/test-pdb-2 --namespace=test-kubectl-describe-pod {{.spec.minAvailable}}: 50% (B[mpoddisruptionbudget.policy/test-pdb-3 created [32mcore.sh:265: Successful get pdb/test-pdb-3 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 2 (B[mpoddisruptionbudget.policy/test-pdb-4 created [32mcore.sh:269: Successful get pdb/test-pdb-4 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 50% (B[merror: min-available and max-unavailable cannot be both specified [32mcore.sh:275: Successful get pods --namespace=test-kubectl-describe-pod {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/env-test-pod created matched TEST_CMD_1 matched <set to the key 'key-1' in secret 'test-secret'> matched TEST_CMD_2 matched <set to the key 'key-2' of config map 'test-configmap'> ... skipping 190 lines ... has:kubectl-create service "modified" deleted replicationcontroller "modified" deleted [32mcore.sh:471: Successful get service {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mcore.sh:472: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mservice/modified created E0223 10:16:54.099715 60038 garbagecollector.go:350] error syncing item &garbagecollector.node{identity:garbagecollector.objectReference{OwnerReference:v1.OwnerReference{APIVersion:"discovery.k8s.io/v1beta1", Kind:"EndpointSlice", Name:"modified-6h6hh", UID:"1f4f484c-9b5f-45c3-ad64-ea171900a9dd", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}, Namespace:"namespace-1614075408-2319"}, dependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:1, readerWait:0}, dependents:map[*garbagecollector.node]struct {}{}, deletingDependents:false, deletingDependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:0, readerWait:0}, beingDeleted:false, beingDeletedLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:0, readerWait:0}, virtual:false, virtualLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:0, readerWait:0}, owners:[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Service", Name:"modified", UID:"4c2c80c7-b3a8-465e-90a6-d3d06eb87e1e", Controller:(*bool)(0xc001e7d550), BlockOwnerDeletion:(*bool)(0xc001e7d551)}}}: endpointslices.discovery.k8s.io "modified-6h6hh" not found replicationcontroller/modified created I0223 10:16:54.107390 60038 event.go:291] "Event occurred" object="namespace-1614075408-2319/modified" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: modified-q5hg5" [32mcore.sh:476: Successful get service {{range.items}}{{.metadata.name}}:{{end}}: modified: (B[m[32mcore.sh:477: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: modified: (B[mservice "modified" deleted replicationcontroller "modified" deleted ... skipping 22 lines ... [32mcore.sh:534: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:3.4.1: (B[mSuccessful message:kubectl-create kubectl-patch has:kubectl-patch pod/valid-pod patched [32mcore.sh:554: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx: (B[m+++ [0223 10:16:57] "kubectl patch with resourceVersion 588" returns error as expected: Error from server (Conflict): Operation cannot be fulfilled on pods "valid-pod": the object has been modified; please apply your changes to the latest version and try again pod "valid-pod" deleted pod/valid-pod replaced [32mcore.sh:578: Successful get pod valid-pod {{(index .spec.containers 0).name}}: replaced-k8s-serve-hostname (B[mSuccessful message:kubectl-replace has:kubectl-replace Successful message:error: --grace-period must have --force specified has:\-\-grace-period must have \-\-force specified Successful message:error: --timeout must have --force specified has:\-\-timeout must have \-\-force specified node/node-v1-test created W0223 10:16:57.998585 60038 actual_state_of_world.go:534] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="node-v1-test" does not exist [32mcore.sh:606: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mnode/node-v1-test replaced (server dry run) node/node-v1-test replaced (dry run) [32mcore.sh:631: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mnode/node-v1-test replaced [32mcore.sh:647: Successful get node node-v1-test {{.metadata.annotations.a}}: b ... skipping 29 lines ... spec: containers: - image: k8s.gcr.io/pause:3.4.1 name: kubernetes-pause has:localonlyvalue [32mcore.sh:683: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[merror: 'name' already has a value (valid-pod), and --overwrite is false [32mcore.sh:687: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[m[32mcore.sh:691: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[mpod/valid-pod labeled [32mcore.sh:695: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod-super-sayan (B[m[32mcore.sh:699: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mwarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. ... skipping 83 lines ... +++ Running case: test-cmd.run_kubectl_create_error_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_kubectl_create_error_tests +++ [0223 10:17:08] Creating namespace namespace-1614075428-5344 namespace/namespace-1614075428-5344 created Context "test" modified. +++ [0223 10:17:08] Testing kubectl create with error Error: must specify one of -f and -k Create a resource from a file or from stdin. JSON and YAML formats are accepted. Examples: ... skipping 44 lines ... Usage: kubectl create -f FILENAME [options] Use "kubectl <command> --help" for more information about a given command. Use "kubectl options" for a list of global command-line options (applies to all commands). +++ [0223 10:17:08] "kubectl create with empty string list returns error as expected: error: error validating "hack/testdata/invalid-rc-with-empty-args.yaml": error validating data: ValidationError(ReplicationController.spec.template.spec.containers[0].args): unknown object type "nil" in ReplicationController.spec.template.spec.containers[0].args[0]; if you choose to ignore these errors, turn validation off with --validate=false +++ exit code: 0 Recording: run_kubectl_apply_tests Running command: run_kubectl_apply_tests +++ Running case: test-cmd.run_kubectl_apply_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 31 lines ... Waiting for Get pods {{range.items}}{{.metadata.name}}:{{end}} : expected: , got: test-deployment-retainkeys-8695b756f8-7thdw: Waiting for Get pods {{range.items}}{{.metadata.name}}:{{end}} : expected: , got: test-deployment-retainkeys-8695b756f8-7thdw: [32mapply.sh:88: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mapply.sh:92: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[mSuccessful message:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted [32mapply.sh:101: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0223 10:17:13.674465 68488 helpers.go:567] --dry-run=true is deprecated (boolean value) and can be replaced with --dry-run=client. pod/test-pod created (dry run) pod/test-pod created (dry run) ... skipping 34 lines ... (B[mpod/b created [32mapply.sh:196: Successful get pods a {{.metadata.name}}: a (B[m[32mapply.sh:197: Successful get pods b -n nsb {{.metadata.name}}: b (B[mpod "a" deleted pod "b" deleted Successful message:error: all resources selected for prune without explicitly passing --all. To prune all resources, pass the --all flag. If you did not mean to prune all resources, specify a label selector has:all resources selected for prune without explicitly passing --all pod/a created pod/b created service/prune-svc created Warning: extensions/v1beta1 Ingress is deprecated in v1.14+, unavailable in v1.22+; use networking.k8s.io/v1 Ingress I0223 10:17:22.617905 60038 horizontal.go:359] Horizontal Pod Autoscaler frontend has been deleted in namespace-1614075425-7906 ... skipping 44 lines ... (B[mpod/b unchanged pod/a pruned Warning: extensions/v1beta1 Ingress is deprecated in v1.14+, unavailable in v1.22+; use networking.k8s.io/v1 Ingress [32mapply.sh:254: Successful get pods -n nsb {{range.items}}{{.metadata.name}}:{{end}}: b: (B[mnamespace "nsb" deleted Successful message:error: the namespace from the provided object "nsb" does not match the namespace "foo". You must pass '--namespace=nsb' to perform this operation. has:the namespace from the provided object "nsb" does not match the namespace "foo". [32mapply.sh:265: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: (B[mservice/a created [32mapply.sh:269: Successful get services a {{.metadata.name}}: a (B[mSuccessful message:The Service "a" is invalid: spec.clusterIPs[0]: Invalid value: []string{"10.0.0.12"}: may not change once set ... skipping 25 lines ... (B[m[32mapply.sh:291: Successful get deployment test-the-deployment {{.metadata.name}}: test-the-deployment (B[m[32mapply.sh:292: Successful get service test-the-service {{.metadata.name}}: test-the-service (B[mconfigmap "test-the-map" deleted service "test-the-service" deleted deployment.apps "test-the-deployment" deleted Successful message:Error from server (NotFound): namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found [32mapply.sh:300: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:namespace/multi-resource-ns created Error from server (NotFound): error when creating "hack/testdata/multi-resource-1.yaml": namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found Successful message:Error from server (NotFound): pods "test-pod" not found has:pods "test-pod" not found pod/test-pod created namespace/multi-resource-ns unchanged [32mapply.sh:308: Successful get pods test-pod -n multi-resource-ns {{.metadata.name}}: test-pod (B[mpod "test-pod" deleted namespace "multi-resource-ns" deleted I0223 10:17:51.243723 60038 namespace_controller.go:185] Namespace has been deleted nsb [32mapply.sh:314: Successful get configmaps --field-selector=metadata.name=foo {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:configmap/foo created error: unable to recognize "hack/testdata/multi-resource-2.yaml": no matches for kind "Bogus" in version "example.com/v1" has:no matches for kind "Bogus" in version "example.com/v1" [32mapply.sh:320: Successful get configmaps foo {{.metadata.name}}: foo (B[mconfigmap "foo" deleted [32mapply.sh:326: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:pod/pod-a created ... skipping 6 lines ... pod "pod-c" deleted [32mapply.sh:334: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapply.sh:338: Successful get crds {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:Warning: apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition customresourcedefinition.apiextensions.k8s.io/widgets.example.com created error: unable to recognize "hack/testdata/multi-resource-4.yaml": no matches for kind "Widget" in version "example.com/v1" has:no matches for kind "Widget" in version "example.com/v1" I0223 10:17:56.539868 56345 client.go:360] parsed scheme: "endpoint" I0223 10:17:56.539921 56345 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 <nil> 0 <nil>}] Successful message:Error from server (NotFound): widgets.example.com "foo" not found has:widgets.example.com "foo" not found [32mapply.sh:344: Successful get crds widgets.example.com {{.metadata.name}}: widgets.example.com (B[mI0223 10:17:58.831975 56345 controller.go:611] quota admission added evaluator for: widgets.example.com widget.example.com/foo created Warning: apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition customresourcedefinition.apiextensions.k8s.io/widgets.example.com unchanged ... skipping 34 lines ... message:867 has:867 pod "test-pod" deleted [32mapply.sh:403: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m+++ [0223 10:18:01] Testing upgrade kubectl client-side apply to server-side apply pod/test-pod created error: Apply failed with 1 conflict: conflict with "kubectl-client-side-apply" using v1: .metadata.labels.name Please review the fields above--they currently have other managers. Here are the ways you can resolve this warning: * If you intend to manage all of these fields, please re-run the apply command with the `--force-conflicts` flag. * If you do not intend to manage all of the fields, please edit your manifest to remove references to the fields that should keep their ... skipping 79 lines ... (B[mpod "nginx-extensions" deleted Successful message:pod/test1 created has:pod/test1 created pod "test1" deleted Successful message:error: Invalid image name "InvalidImageName": invalid reference format has:error: Invalid image name "InvalidImageName": invalid reference format +++ exit code: 0 Recording: run_kubectl_create_filter_tests Running command: run_kubectl_create_filter_tests +++ Running case: test-cmd.run_kubectl_create_filter_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 3 lines ... Context "test" modified. +++ [0223 10:18:05] Testing kubectl create filter [32mcreate.sh:50: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mcreate.sh:54: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[mSuccessful message:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted +++ exit code: 0 Recording: run_kubectl_apply_deployments_tests Running command: run_kubectl_apply_deployments_tests ... skipping 32 lines ... I0223 10:18:07.657821 60038 event.go:291] "Event occurred" object="namespace-1614075485-12690/nginx-9bb9c4878" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-9bb9c4878-zmnls" [32mapps.sh:152: Successful get deployment nginx {{.metadata.name}}: nginx (B[mI0223 10:18:10.457630 56345 client.go:360] parsed scheme: "passthrough" I0223 10:18:10.457710 56345 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{http://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>} I0223 10:18:10.457720 56345 clientconn.go:948] ClientConn switching balancer to "pick_first" Successful message:Error from server (Conflict): error when applying patch: {"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1614075485-12690\",\"resourceVersion\":\"99\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx2\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx2\"}},\"spec\":{\"containers\":[{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"},"resourceVersion":"99"},"spec":{"selector":{"matchLabels":{"name":"nginx2"}},"template":{"metadata":{"labels":{"name":"nginx2"}}}}} to: Resource: "apps/v1, Resource=deployments", GroupVersionKind: "apps/v1, Kind=Deployment" Name: "nginx", Namespace: "namespace-1614075485-12690" for: "hack/testdata/deployment-label-change2.yaml": Operation cannot be fulfilled on deployments.apps "nginx": the object has been modified; please apply your changes to the latest version and try again has:Error from server (Conflict) deployment.apps/nginx configured I0223 10:18:16.236673 60038 event.go:291] "Event occurred" object="namespace-1614075485-12690/nginx" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-6dd6cfdb57 to 3" I0223 10:18:16.242321 60038 event.go:291] "Event occurred" object="namespace-1614075485-12690/nginx-6dd6cfdb57" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-6dd6cfdb57-snjph" I0223 10:18:16.245934 60038 event.go:291] "Event occurred" object="namespace-1614075485-12690/nginx-6dd6cfdb57" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-6dd6cfdb57-8j4np" I0223 10:18:16.249629 60038 event.go:291] "Event occurred" object="namespace-1614075485-12690/nginx-6dd6cfdb57" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-6dd6cfdb57-92wn9" Successful ... skipping 300 lines ... +++ [0223 10:18:24] Creating namespace namespace-1614075504-29428 namespace/namespace-1614075504-29428 created Context "test" modified. +++ [0223 10:18:25] Testing kubectl get [32mget.sh:29: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:37: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:45: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:{ "apiVersion": "v1", "items": [], ... skipping 23 lines ... has not:No resources found Successful message:NAME has not:No resources found [32mget.sh:73: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:error: the server doesn't have a resource type "foobar" has not:No resources found Successful message:No resources found in namespace-1614075504-29428 namespace. has:No resources found Successful message: has not:No resources found Successful message:No resources found in namespace-1614075504-29428 namespace. has:No resources found [32mget.sh:93: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:Error from server (NotFound): pods "abc" not found has:pods "abc" not found Successful message:Error from server (NotFound): pods "abc" not found has not:List Successful message:I0223 10:18:26.766234 71997 loader.go:372] Config loaded from file: /tmp/tmp.1pTtCmR0KM/.kube/config I0223 10:18:26.773036 71997 round_trippers.go:454] GET https://127.0.0.1:6443/version?timeout=32s 200 OK in 6 milliseconds I0223 10:18:26.798000 71997 round_trippers.go:454] GET https://127.0.0.1:6443/api/v1/namespaces/default/pods 200 OK in 2 milliseconds I0223 10:18:26.799911 71997 round_trippers.go:454] GET https://127.0.0.1:6443/api/v1/namespaces/default/replicationcontrollers 200 OK in 1 milliseconds ... skipping 591 lines ... } [32mget.sh:158: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m<no value>Successful message:valid-pod: has:valid-pod: Successful message:error: error executing jsonpath "{.missing}": Error executing template: missing is not found. Printing more information for debugging the template: template was: {.missing} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "kind":"Pod", "metadata":map[string]interface {}{"creationTimestamp":"2021-02-23T10:18:34Z", "labels":map[string]interface {}{"name":"valid-pod"}, "managedFields":[]interface {}{map[string]interface {}{"apiVersion":"v1", "fieldsType":"FieldsV1", "fieldsV1":map[string]interface {}{"f:metadata":map[string]interface {}{"f:labels":map[string]interface {}{".":map[string]interface {}{}, "f:name":map[string]interface {}{}}}, "f:spec":map[string]interface {}{"f:containers":map[string]interface {}{"k:{\"name\":\"kubernetes-serve-hostname\"}":map[string]interface {}{".":map[string]interface {}{}, "f:image":map[string]interface {}{}, "f:imagePullPolicy":map[string]interface {}{}, "f:name":map[string]interface {}{}, "f:resources":map[string]interface {}{".":map[string]interface {}{}, "f:limits":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}, "f:requests":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}}, "f:terminationMessagePath":map[string]interface {}{}, "f:terminationMessagePolicy":map[string]interface {}{}}}, "f:dnsPolicy":map[string]interface {}{}, "f:enableServiceLinks":map[string]interface {}{}, "f:restartPolicy":map[string]interface {}{}, "f:schedulerName":map[string]interface {}{}, "f:securityContext":map[string]interface {}{}, "f:terminationGracePeriodSeconds":map[string]interface {}{}}}, "manager":"kubectl-create", "operation":"Update", "time":"2021-02-23T10:18:34Z"}}, "name":"valid-pod", "namespace":"namespace-1614075514-29622", "resourceVersion":"1033", "uid":"571d04df-08c3-4d66-9fbe-46683e7a09f6"}, "spec":map[string]interface {}{"containers":[]interface {}{map[string]interface {}{"image":"k8s.gcr.io/serve_hostname", "imagePullPolicy":"Always", "name":"kubernetes-serve-hostname", "resources":map[string]interface {}{"limits":map[string]interface {}{"cpu":"1", "memory":"512Mi"}, "requests":map[string]interface {}{"cpu":"1", "memory":"512Mi"}}, "terminationMessagePath":"/dev/termination-log", "terminationMessagePolicy":"File"}}, "dnsPolicy":"ClusterFirst", "enableServiceLinks":true, "preemptionPolicy":"PreemptLowerPriority", "priority":0, "restartPolicy":"Always", "schedulerName":"default-scheduler", "securityContext":map[string]interface {}{}, "terminationGracePeriodSeconds":30}, "status":map[string]interface {}{"phase":"Pending", "qosClass":"Guaranteed"}} has:missing is not found error: error executing template "{{.missing}}": template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing" Successful message:Error executing template: template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing". Printing more information for debugging the template: template was: {{.missing}} raw data was: {"apiVersion":"v1","kind":"Pod","metadata":{"creationTimestamp":"2021-02-23T10:18:34Z","labels":{"name":"valid-pod"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"kubernetes-serve-hostname\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{".":{},"f:limits":{".":{},"f:cpu":{},"f:memory":{}},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}},"manager":"kubectl-create","operation":"Update","time":"2021-02-23T10:18:34Z"}],"name":"valid-pod","namespace":"namespace-1614075514-29622","resourceVersion":"1033","uid":"571d04df-08c3-4d66-9fbe-46683e7a09f6"},"spec":{"containers":[{"image":"k8s.gcr.io/serve_hostname","imagePullPolicy":"Always","name":"kubernetes-serve-hostname","resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"1","memory":"512Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority","priority":0,"restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30},"status":{"phase":"Pending","qosClass":"Guaranteed"}} object given to template engine was: map[apiVersion:v1 kind:Pod metadata:map[creationTimestamp:2021-02-23T10:18:34Z labels:map[name:valid-pod] managedFields:[map[apiVersion:v1 fieldsType:FieldsV1 fieldsV1:map[f:metadata:map[f:labels:map[.:map[] f:name:map[]]] f:spec:map[f:containers:map[k:{"name":"kubernetes-serve-hostname"}:map[.:map[] f:image:map[] f:imagePullPolicy:map[] f:name:map[] f:resources:map[.:map[] f:limits:map[.:map[] f:cpu:map[] f:memory:map[]] f:requests:map[.:map[] f:cpu:map[] f:memory:map[]]] f:terminationMessagePath:map[] f:terminationMessagePolicy:map[]]] f:dnsPolicy:map[] f:enableServiceLinks:map[] f:restartPolicy:map[] f:schedulerName:map[] f:securityContext:map[] f:terminationGracePeriodSeconds:map[]]] manager:kubectl-create operation:Update time:2021-02-23T10:18:34Z]] name:valid-pod namespace:namespace-1614075514-29622 resourceVersion:1033 uid:571d04df-08c3-4d66-9fbe-46683e7a09f6] spec:map[containers:[map[image:k8s.gcr.io/serve_hostname imagePullPolicy:Always name:kubernetes-serve-hostname resources:map[limits:map[cpu:1 memory:512Mi] requests:map[cpu:1 memory:512Mi]] terminationMessagePath:/dev/termination-log terminationMessagePolicy:File]] dnsPolicy:ClusterFirst enableServiceLinks:true preemptionPolicy:PreemptLowerPriority priority:0 restartPolicy:Always schedulerName:default-scheduler securityContext:map[] terminationGracePeriodSeconds:30] status:map[phase:Pending qosClass:Guaranteed]] ... skipping 84 lines ... terminationGracePeriodSeconds: 30 status: phase: Pending qosClass: Guaranteed has:name: valid-pod Successful message:Error from server (NotFound): pods "invalid-pod" not found has:"invalid-pod" not found pod "valid-pod" deleted [32mget.sh:196: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/redis-master created pod/valid-pod created Successful ... skipping 36 lines ... +++ [0223 10:18:40] Creating namespace namespace-1614075520-5085 namespace/namespace-1614075520-5085 created Context "test" modified. +++ [0223 10:18:40] Testing kubectl exec POD COMMAND Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (NotFound): pods "abc" not found has:pods "abc" not found pod/test-pod created Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pods "test-pod" not found Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pod or type/name must be specified pod "test-pod" deleted +++ exit code: 0 Recording: run_kubectl_exec_resource_name_tests Running command: run_kubectl_exec_resource_name_tests ... skipping 3 lines ... +++ [0223 10:18:40] Creating namespace namespace-1614075520-14225 namespace/namespace-1614075520-14225 created Context "test" modified. +++ [0223 10:18:40] Testing kubectl exec TYPE/NAME COMMAND Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. error: the server doesn't have a resource type "foo" has:error: Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (NotFound): deployments.apps "bar" not found has:"bar" not found pod/test-pod created I0223 10:18:41.582293 56345 client.go:360] parsed scheme: "passthrough" I0223 10:18:41.582402 56345 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{http://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>} I0223 10:18:41.582414 56345 clientconn.go:948] ClientConn switching balancer to "pick_first" replicaset.apps/frontend created I0223 10:18:41.592066 60038 event.go:291] "Event occurred" object="namespace-1614075520-14225/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-qtxxd" I0223 10:18:41.595188 60038 event.go:291] "Event occurred" object="namespace-1614075520-14225/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-r4df5" I0223 10:18:41.596499 60038 event.go:291] "Event occurred" object="namespace-1614075520-14225/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-lsxzq" configmap/test-set-env-config created Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. error: cannot attach to *v1.ConfigMap: selector for *v1.ConfigMap not implemented has:not implemented Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:not found Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pod, type/name or --filename must be specified Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod frontend-lsxzq does not have a host assigned has not:not found Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod frontend-lsxzq does not have a host assigned has not:pod, type/name or --filename must be specified pod "test-pod" deleted replicaset.apps "frontend" deleted configmap "test-set-env-config" deleted +++ exit code: 0 Recording: run_create_secret_tests Running command: run_create_secret_tests +++ Running case: test-cmd.run_create_secret_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_create_secret_tests Successful message:Error from server (NotFound): secrets "mysecret" not found has:secrets "mysecret" not found Successful message:user-specified has:user-specified Successful message:Error from server (NotFound): secrets "mysecret" not found has:secrets "mysecret" not found Successful {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"dc2b394e-9ff8-4483-adc9-dc4ddd2d48aa","resourceVersion":"1112","creationTimestamp":"2021-02-23T10:18:42Z"}} Successful message:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"dc2b394e-9ff8-4483-adc9-dc4ddd2d48aa","resourceVersion":"1113","creationTimestamp":"2021-02-23T10:18:42Z"},"data":{"key1":"config1"}} has:uid Successful message:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"dc2b394e-9ff8-4483-adc9-dc4ddd2d48aa","resourceVersion":"1113","creationTimestamp":"2021-02-23T10:18:42Z"},"data":{"key1":"config1"}} has:config1 {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Success","details":{"name":"tester-update-cm","kind":"configmaps","uid":"dc2b394e-9ff8-4483-adc9-dc4ddd2d48aa"}} Successful message:Error from server (NotFound): configmaps "tester-update-cm" not found has:configmaps "tester-update-cm" not found +++ exit code: 0 Recording: run_kubectl_create_kustomization_directory_tests Running command: run_kubectl_create_kustomization_directory_tests +++ Running case: test-cmd.run_kubectl_create_kustomization_directory_tests ... skipping 73 lines ... securityContext: {} terminationGracePeriodSeconds: 30 status: {} has:apps/v1beta1 deployment.apps "nginx" deleted Successful message:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing Successful message:nginx: has:nginx: +++ exit code: 0 Recording: run_kubectl_delete_allnamespaces_tests ... skipping 104 lines ... has:Timeout Successful message:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 2s has:valid-pod Successful message:error: Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h) has:Invalid timeout value pod "valid-pod" deleted +++ exit code: 0 Recording: run_crd_tests Running command: run_crd_tests ... skipping 158 lines ... foo.company.com/test patched [32mcrd.sh:236: Successful get foos/test {{.patched}}: value1 (B[mfoo.company.com/test patched [32mcrd.sh:238: Successful get foos/test {{.patched}}: value2 (B[mfoo.company.com/test patched [32mcrd.sh:240: Successful get foos/test {{.patched}}: <no value> (B[m+++ [0223 10:18:54] "kubectl patch --local" returns error as expected for CustomResource: error: strategic merge patch is not supported for company.com/v1, Kind=Foo locally, try --type merge { "apiVersion": "company.com/v1", "kind": "Foo", "metadata": { "annotations": { "kubernetes.io/change-cause": "kubectl patch foos/test --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true --patch={\"patched\":null} --type=merge --record=true" ... skipping 291 lines ... [32mcrd.sh:455: Successful get bars {{len .items}}: 1 (B[mnamespace "non-native-resources" deleted I0223 10:19:25.083900 56345 client.go:360] parsed scheme: "passthrough" I0223 10:19:25.083968 56345 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{http://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>} I0223 10:19:25.083994 56345 clientconn.go:948] ClientConn switching balancer to "pick_first" [32mcrd.sh:458: Successful get bars {{len .items}}: 0 (B[mError from server (NotFound): namespaces "non-native-resources" not found customresourcedefinition.apiextensions.k8s.io "foos.company.com" deleted customresourcedefinition.apiextensions.k8s.io "bars.company.com" deleted customresourcedefinition.apiextensions.k8s.io "resources.mygroup.example.com" deleted customresourcedefinition.apiextensions.k8s.io "validfoos.company.com" deleted +++ exit code: 0 +++ [0223 10:19:27] Testing recursive resources ... skipping 2 lines ... Context "test" modified. [32mgeneric-resources.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mgeneric-resources.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mSuccessful message:pod/busybox0 created pod/busybox1 created error: error validating "hack/testdata/recursive/pod/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:211: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mW0223 10:19:27.920655 56345 cacher.go:148] Terminating all watchers from cacher *unstructured.Unstructured E0223 10:19:27.922284 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource W0223 10:19:28.015891 56345 cacher.go:148] Terminating all watchers from cacher *unstructured.Unstructured E0223 10:19:28.017523 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:220: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: busybox:busybox: (B[mSuccessful message:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing W0223 10:19:28.122956 56345 cacher.go:148] Terminating all watchers from cacher *unstructured.Unstructured E0223 10:19:28.124679 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:227: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mW0223 10:19:28.220177 56345 cacher.go:148] Terminating all watchers from cacher *unstructured.Unstructured E0223 10:19:28.221601 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:231: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced: (B[mSuccessful message:pod/busybox0 replaced pod/busybox1 replaced error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:236: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mSuccessful message:Name: busybox0 Namespace: namespace-1614075567-16457 Priority: 0 Node: <none> ... skipping 155 lines ... Node-Selectors: <none> Tolerations: <none> Events: <none> unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:246: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mE0223 10:19:29.030970 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0223 10:19:29.046402 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:250: Successful get pods {{range.items}}{{.metadata.annotations.annotatekey}}:{{end}}: annotatevalue:annotatevalue: (B[mSuccessful message:pod/busybox0 annotated pod/busybox1 annotated error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:255: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mE0223 10:19:29.437598 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:259: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced: (B[mSuccessful message:Warning: resource pods/busybox0 is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. pod/busybox0 configured Warning: resource pods/busybox1 is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. pod/busybox1 configured error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:264: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mSuccessful message:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:busybox0:busybox1: Successful message:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing E0223 10:19:29.686006 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:273: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mpod/busybox0 labeled pod/busybox1 labeled error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' [32mgeneric-resources.sh:278: Successful get pods {{range.items}}{{.metadata.labels.mylabel}}:{{end}}: myvalue:myvalue: (B[mSuccessful message:pod/busybox0 labeled pod/busybox1 labeled error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:283: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mpod/busybox0 patched pod/busybox1 patched error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' [32mgeneric-resources.sh:288: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: prom/busybox:prom/busybox: (B[mSuccessful message:pod/busybox0 patched pod/busybox1 patched error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:293: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:297: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "busybox0" force deleted pod "busybox1" force deleted error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:302: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mE0223 10:19:30.699050 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource replicationcontroller/busybox0 created I0223 10:19:30.771592 60038 event.go:291] "Event occurred" object="namespace-1614075567-16457/busybox0" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-6dg64" replicationcontroller/busybox1 created error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0223 10:19:30.777315 60038 event.go:291] "Event occurred" object="namespace-1614075567-16457/busybox1" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-k8dls" [32mgeneric-resources.sh:306: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:311: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:312: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[mE0223 10:19:31.077873 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:313: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:318: Successful get hpa busybox0 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 80 (B[m[32mgeneric-resources.sh:319: Successful get hpa busybox1 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 80 (B[mSuccessful message:horizontalpodautoscaler.autoscaling/busybox0 autoscaled horizontalpodautoscaler.autoscaling/busybox1 autoscaled error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing horizontalpodautoscaler.autoscaling "busybox0" deleted E0223 10:19:31.505875 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource horizontalpodautoscaler.autoscaling "busybox1" deleted [32mgeneric-resources.sh:327: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mI0223 10:19:31.654565 60038 namespace_controller.go:185] Namespace has been deleted non-native-resources [32mgeneric-resources.sh:328: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:329: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:333: Successful get service busybox0 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 (B[mE0223 10:19:31.981203 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:334: Successful get service busybox1 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 (B[mSuccessful message:service/busybox0 exposed service/busybox1 exposed error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:340: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:341: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:342: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[mI0223 10:19:32.407851 60038 event.go:291] "Event occurred" object="namespace-1614075567-16457/busybox0" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-4q4wm" I0223 10:19:32.420722 60038 event.go:291] "Event occurred" object="namespace-1614075567-16457/busybox1" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-crfhq" [32mgeneric-resources.sh:346: Successful get rc busybox0 {{.spec.replicas}}: 2 (B[m[32mgeneric-resources.sh:347: Successful get rc busybox1 {{.spec.replicas}}: 2 (B[mSuccessful message:replicationcontroller/busybox0 scaled replicationcontroller/busybox1 scaled error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:352: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:356: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. replicationcontroller "busybox0" force deleted replicationcontroller "busybox1" force deleted error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:361: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mdeployment.apps/nginx1-deployment created I0223 10:19:33.135378 60038 event.go:291] "Event occurred" object="namespace-1614075567-16457/nginx1-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx1-deployment-758b5949b6 to 2" deployment.apps/nginx0-deployment created error: error validating "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0223 10:19:33.140037 60038 event.go:291] "Event occurred" object="namespace-1614075567-16457/nginx1-deployment-758b5949b6" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx1-deployment-758b5949b6-q549b" I0223 10:19:33.142193 60038 event.go:291] "Event occurred" object="namespace-1614075567-16457/nginx0-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx0-deployment-75db9cdfd9 to 2" I0223 10:19:33.143061 60038 event.go:291] "Event occurred" object="namespace-1614075567-16457/nginx1-deployment-758b5949b6" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx1-deployment-758b5949b6-f6rwj" I0223 10:19:33.149789 60038 event.go:291] "Event occurred" object="namespace-1614075567-16457/nginx0-deployment-75db9cdfd9" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx0-deployment-75db9cdfd9-fwg9l" I0223 10:19:33.154903 60038 event.go:291] "Event occurred" object="namespace-1614075567-16457/nginx0-deployment-75db9cdfd9" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx0-deployment-75db9cdfd9-wfmdj" [32mgeneric-resources.sh:365: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx0-deployment:nginx1-deployment: (B[m[32mgeneric-resources.sh:366: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9:k8s.gcr.io/nginx:1.7.9: (B[m[32mgeneric-resources.sh:370: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9:k8s.gcr.io/nginx:1.7.9: (B[mSuccessful message:deployment.apps/nginx1-deployment skipped rollback (current template already matches revision 1) deployment.apps/nginx0-deployment skipped rollback (current template already matches revision 1) error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing deployment.apps/nginx1-deployment paused deployment.apps/nginx0-deployment paused [32mgeneric-resources.sh:378: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: true:true: (B[mSuccessful message:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' ... skipping 10 lines ... 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:nginx0-deployment Successful message:deployment.apps/nginx1-deployment REVISION CHANGE-CAUSE 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:nginx1-deployment Successful message:deployment.apps/nginx1-deployment REVISION CHANGE-CAUSE 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. deployment.apps "nginx1-deployment" force deleted deployment.apps "nginx0-deployment" force deleted error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' [32mgeneric-resources.sh:400: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicationcontroller/busybox0 created I0223 10:19:35.378156 60038 event.go:291] "Event occurred" object="namespace-1614075567-16457/busybox0" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-6bjkt" replicationcontroller/busybox1 created error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0223 10:19:35.384069 60038 event.go:291] "Event occurred" object="namespace-1614075567-16457/busybox1" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-bxm56" [32mgeneric-resources.sh:404: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mSuccessful message:no rollbacker has been implemented for "ReplicationController" no rollbacker has been implemented for "ReplicationController" unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' ... skipping 2 lines ... message:no rollbacker has been implemented for "ReplicationController" no rollbacker has been implemented for "ReplicationController" unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:Object 'Kind' is missing Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:replicationcontrollers "busybox0" pausing is not supported Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:replicationcontrollers "busybox1" pausing is not supported Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:Object 'Kind' is missing Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:replicationcontrollers "busybox0" resuming is not supported Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:replicationcontrollers "busybox1" resuming is not supported warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. replicationcontroller "busybox0" force deleted replicationcontroller "busybox1" force deleted error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' E0223 10:19:36.364609 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0223 10:19:36.500398 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0223 10:19:36.592848 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource Recording: run_namespace_tests Running command: run_namespace_tests +++ Running case: test-cmd.run_namespace_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_namespace_tests +++ [0223 10:19:36] Testing kubectl(v1:namespaces) Successful message:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created (dry run) E0223 10:19:37.111477 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource namespace/my-namespace created (server dry run) Successful message:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created [32mcore.sh:1459: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace (B[mnamespace "my-namespace" deleted namespace/my-namespace condition met Successful message:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created [32mcore.sh:1468: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace (B[mSuccessful message:warning: deleting cluster-scoped resources, not scoped to the provided namespace namespace "kube-node-lease" deleted ... skipping 31 lines ... namespace "namespace-1614075525-27279" deleted namespace "namespace-1614075525-5573" deleted namespace "namespace-1614075526-1047" deleted namespace "namespace-1614075528-20126" deleted namespace "namespace-1614075529-5241" deleted namespace "namespace-1614075567-16457" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted has:warning: deleting cluster-scoped resources Successful message:warning: deleting cluster-scoped resources, not scoped to the provided namespace namespace "kube-node-lease" deleted namespace "my-namespace" deleted namespace "namespace-1614075372-10817" deleted ... skipping 29 lines ... namespace "namespace-1614075525-27279" deleted namespace "namespace-1614075525-5573" deleted namespace "namespace-1614075526-1047" deleted namespace "namespace-1614075528-20126" deleted namespace "namespace-1614075529-5241" deleted namespace "namespace-1614075567-16457" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted has:namespace "my-namespace" deleted namespace/quotas created [32mcore.sh:1475: Successful get namespaces/quotas {{.metadata.name}}: quotas (B[m[32mcore.sh:1476: Successful get quota --namespace=quotas {{range.items}}{{ if eq .metadata.name \"test-quota\" }}found{{end}}{{end}}:: : (B[mE0223 10:19:43.285701 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource resourcequota/test-quota created (dry run) resourcequota/test-quota created (server dry run) [32mcore.sh:1480: Successful get quota --namespace=quotas {{range.items}}{{ if eq .metadata.name \"test-quota\" }}found{{end}}{{end}}:: : (B[mresourcequota/test-quota created [32mcore.sh:1483: Successful get quota --namespace=quotas {{range.items}}{{ if eq .metadata.name \"test-quota\" }}found{{end}}{{end}}:: found: (B[mI0223 10:19:43.810070 60038 resource_quota_controller.go:307] Resource quota has been deleted quotas/test-quota resourcequota "test-quota" deleted namespace "quotas" deleted E0223 10:19:44.595069 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0223 10:19:44.637249 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0223 10:19:45.567651 60038 shared_informer.go:240] Waiting for caches to sync for garbage collector I0223 10:19:45.567711 60038 shared_informer.go:247] Caches are synced for garbage collector I0223 10:19:46.006298 60038 shared_informer.go:240] Waiting for caches to sync for resource quota I0223 10:19:46.006478 60038 shared_informer.go:247] Caches are synced for resource quota I0223 10:19:46.197119 60038 horizontal.go:359] Horizontal Pod Autoscaler busybox0 has been deleted in namespace-1614075567-16457 I0223 10:19:46.200854 60038 horizontal.go:359] Horizontal Pod Autoscaler busybox1 has been deleted in namespace-1614075567-16457 E0223 10:19:47.790876 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcore.sh:1495: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"other\" }}found{{end}}{{end}}:: : (B[mnamespace/other created [32mcore.sh:1499: Successful get namespaces/other {{.metadata.name}}: other (B[m[32mcore.sh:1503: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/valid-pod created [32mcore.sh:1507: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:1509: Successful get pods -n other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mSuccessful message:error: a resource cannot be retrieved by name across all namespaces has:a resource cannot be retrieved by name across all namespaces [32mcore.sh:1516: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mwarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mcore.sh:1520: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: (B[mnamespace "other" deleted ... skipping 116 lines ... (B[m[32mcore.sh:910: Successful get secret/secret-string-data --namespace=test-secrets {{.data}}: map[k1:djE= k2:djI=] (B[m[32mcore.sh:911: Successful get secret/secret-string-data --namespace=test-secrets {{.stringData}}: <no value> (B[msecret "secret-string-data" deleted [32mcore.sh:920: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: (B[msecret "test-secret" deleted namespace "test-secrets" deleted E0223 10:19:59.390351 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0223 10:20:00.289429 60038 namespace_controller.go:185] Namespace has been deleted other +++ exit code: 0 Recording: run_configmap_tests Running command: run_configmap_tests +++ Running case: test-cmd.run_configmap_tests ... skipping 18 lines ... configmap/test-binary-configmap created [32mcore.sh:51: Successful get configmap/test-configmap --namespace=test-configmaps {{.metadata.name}}: test-configmap (B[m[32mcore.sh:52: Successful get configmap/test-binary-configmap --namespace=test-configmaps {{.metadata.name}}: test-binary-configmap (B[mconfigmap "test-configmap" deleted configmap "test-binary-configmap" deleted namespace "test-configmaps" deleted E0223 10:20:06.637989 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0223 10:20:07.780286 56345 client.go:360] parsed scheme: "passthrough" I0223 10:20:07.780340 56345 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{http://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>} I0223 10:20:07.780349 56345 clientconn.go:948] ClientConn switching balancer to "pick_first" E0223 10:20:07.981739 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0223 10:20:08.538114 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0223 10:20:08.970233 60038 namespace_controller.go:185] Namespace has been deleted test-secrets +++ exit code: 0 Recording: run_client_config_tests Running command: run_client_config_tests +++ Running case: test-cmd.run_client_config_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_client_config_tests +++ [0223 10:20:11] Creating namespace namespace-1614075611-31801 namespace/namespace-1614075611-31801 created Context "test" modified. +++ [0223 10:20:11] Testing client config Successful message:error: stat missing: no such file or directory has:missing: no such file or directory Successful message:error: stat missing: no such file or directory has:missing: no such file or directory Successful message:error: stat missing: no such file or directory has:missing: no such file or directory Successful message:Error in configuration: context was not found for specified context: missing-context has:context was not found for specified context: missing-context Successful message:error: no server found for cluster "missing-cluster" has:no server found for cluster "missing-cluster" Successful message:error: auth info "missing-user" does not exist has:auth info "missing-user" does not exist Successful message:error: error loading config file "/tmp/newconfig.yaml": no kind "Config" is registered for version "v-1" in scheme "k8s.io/client-go/tools/clientcmd/api/latest/latest.go:50" has:error loading config file Successful message:error: stat missing-config: no such file or directory has:no such file or directory +++ exit code: 0 Recording: run_service_accounts_tests Running command: run_service_accounts_tests +++ Running case: test-cmd.run_service_accounts_tests ... skipping 43 lines ... Labels: <none> Annotations: <none> Schedule: 59 23 31 2 * Concurrency Policy: Allow Suspend: False Successful Job History Limit: 3 Failed Job History Limit: 1 Starting Deadline Seconds: <unset> Selector: <unset> Parallelism: <unset> Completions: <unset> Pod Template: Labels: <none> ... skipping 38 lines ... Labels: controller-uid=03318d45-dc95-41f3-8b3b-9ba9da967661 job-name=test-job Annotations: cronjob.kubernetes.io/instantiate: manual Parallelism: 1 Completions: 1 Start Time: Tue, 23 Feb 2021 10:20:20 +0000 Pods Statuses: 1 Running / 0 Succeeded / 0 Failed Pod Template: Labels: controller-uid=03318d45-dc95-41f3-8b3b-9ba9da967661 job-name=test-job Containers: pi: Image: k8s.gcr.io/perl ... skipping 416 lines ... type: ClusterIP status: loadBalancer: {} Successful message:kubectl-create kubectl-set has:kubectl-set error: you must specify resources by --filename when --local is set. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' [32mcore.sh:1020: Successful get services redis-master {{range.spec.selector}}{{.}}:{{end}}: redis:master:backend: (B[mI0223 10:20:30.646057 60038 namespace_controller.go:185] Namespace has been deleted test-jobs service/redis-master selector updated Successful message:Error from server (Conflict): Operation cannot be fulfilled on services "redis-master": the object has been modified; please apply your changes to the latest version and try again has:Conflict [32mcore.sh:1033: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master: (B[mservice "redis-master" deleted [32mcore.sh:1040: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[m[32mcore.sh:1044: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[mservice/redis-master created ... skipping 122 lines ... (dry run) daemonset.apps/bind rolled back (server dry run) [32mapps.sh:87: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:latest: (B[m[32mapps.sh:88: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[m[32mapps.sh:89: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2 (B[mdaemonset.apps/bind rolled back E0223 10:20:39.982396 60038 daemon_controller.go:320] namespace-1614075637-10655/bind failed with : error storing status for daemon set &v1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"bind", GenerateName:"", Namespace:"namespace-1614075637-10655", SelfLink:"", UID:"39ee7da9-cef7-430d-aef5-83080b172ce6", ResourceVersion:"1994", Generation:3, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63749672438, loc:(*time.Location)(0x6f815e0)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"service":"bind"}, Annotations:map[string]string{"deprecated.daemonset.template.generation":"3", "kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"DaemonSet\",\"metadata\":{\"annotations\":{\"kubernetes.io/change-cause\":\"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true\"},\"labels\":{\"service\":\"bind\"},\"name\":\"bind\",\"namespace\":\"namespace-1614075637-10655\"},\"spec\":{\"selector\":{\"matchLabels\":{\"service\":\"bind\"}},\"template\":{\"metadata\":{\"labels\":{\"service\":\"bind\"}},\"spec\":{\"affinity\":{\"podAntiAffinity\":{\"requiredDuringSchedulingIgnoredDuringExecution\":[{\"labelSelector\":{\"matchExpressions\":[{\"key\":\"service\",\"operator\":\"In\",\"values\":[\"bind\"]}]},\"namespaces\":[],\"topologyKey\":\"kubernetes.io/hostname\"}]}},\"containers\":[{\"image\":\"k8s.gcr.io/pause:latest\",\"name\":\"kubernetes-pause\"},{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"app\"}]}},\"updateStrategy\":{\"rollingUpdate\":{\"maxUnavailable\":\"10%\"},\"type\":\"RollingUpdate\"}}}\n", "kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"kube-controller-manager", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc003c3b320), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc003c3b3a0)}, v1.ManagedFieldsEntry{Manager:"kubectl-client-side-apply", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc003c3b3c0), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc003c3b400)}, v1.ManagedFieldsEntry{Manager:"kubectl", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc003c3b420), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc003c3b440)}}}, Spec:v1.DaemonSetSpec{Selector:(*v1.LabelSelector)(0xc003c3b460), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"service":"bind"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume(nil), InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"kubernetes-pause", Image:"k8s.gcr.io/pause:2.0", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount(nil), VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc0027224f8), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"", DeprecatedServiceAccount:"", AutomountServiceAccountToken:(*bool)(nil), NodeName:"", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc0000c9f10), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(0xc003c3b480), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration(nil), HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(nil), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(nil), PreemptionPolicy:(*v1.PreemptionPolicy)(nil), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil), SetHostnameAsFQDN:(*bool)(nil)}}, UpdateStrategy:v1.DaemonSetUpdateStrategy{Type:"RollingUpdate", RollingUpdate:(*v1.RollingUpdateDaemonSet)(0xc002563a90)}, MinReadySeconds:0, RevisionHistoryLimit:(*int32)(0xc00272254c)}, Status:v1.DaemonSetStatus{CurrentNumberScheduled:0, NumberMisscheduled:0, DesiredNumberScheduled:0, NumberReady:0, ObservedGeneration:2, UpdatedNumberScheduled:0, NumberAvailable:0, NumberUnavailable:0, CollisionCount:(*int32)(nil), Conditions:[]v1.DaemonSetCondition(nil)}}: Operation cannot be fulfilled on daemonsets.apps "bind": the object has been modified; please apply your changes to the latest version and try again [32mapps.sh:92: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:2.0: (B[m[32mapps.sh:93: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mSuccessful message:error: unable to find specified revision 1000000 in history has:unable to find specified revision [32mapps.sh:97: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:2.0: (B[m[32mapps.sh:98: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mdaemonset.apps/bind rolled back E0223 10:20:40.606565 60038 daemon_controller.go:320] namespace-1614075637-10655/bind failed with : error storing status for daemon set &v1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"bind", GenerateName:"", Namespace:"namespace-1614075637-10655", SelfLink:"", UID:"39ee7da9-cef7-430d-aef5-83080b172ce6", ResourceVersion:"1997", Generation:4, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63749672438, loc:(*time.Location)(0x6f815e0)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"service":"bind"}, Annotations:map[string]string{"deprecated.daemonset.template.generation":"4", "kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"DaemonSet\",\"metadata\":{\"annotations\":{\"kubernetes.io/change-cause\":\"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true\"},\"labels\":{\"service\":\"bind\"},\"name\":\"bind\",\"namespace\":\"namespace-1614075637-10655\"},\"spec\":{\"selector\":{\"matchLabels\":{\"service\":\"bind\"}},\"template\":{\"metadata\":{\"labels\":{\"service\":\"bind\"}},\"spec\":{\"affinity\":{\"podAntiAffinity\":{\"requiredDuringSchedulingIgnoredDuringExecution\":[{\"labelSelector\":{\"matchExpressions\":[{\"key\":\"service\",\"operator\":\"In\",\"values\":[\"bind\"]}]},\"namespaces\":[],\"topologyKey\":\"kubernetes.io/hostname\"}]}},\"containers\":[{\"image\":\"k8s.gcr.io/pause:latest\",\"name\":\"kubernetes-pause\"},{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"app\"}]}},\"updateStrategy\":{\"rollingUpdate\":{\"maxUnavailable\":\"10%\"},\"type\":\"RollingUpdate\"}}}\n", "kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"kube-controller-manager", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc001399780), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc0013997c0)}, v1.ManagedFieldsEntry{Manager:"kubectl-client-side-apply", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc0013997e0), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc001399820)}, v1.ManagedFieldsEntry{Manager:"kubectl", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc001399880), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc001399960)}}}, Spec:v1.DaemonSetSpec{Selector:(*v1.LabelSelector)(0xc001399a00), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"service":"bind"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume(nil), InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"kubernetes-pause", Image:"k8s.gcr.io/pause:latest", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount(nil), VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"app", Image:"k8s.gcr.io/nginx:test-cmd", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount(nil), VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc0036a5698), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"", DeprecatedServiceAccount:"", AutomountServiceAccountToken:(*bool)(nil), NodeName:"", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc000c4b420), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(0xc001399aa0), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration(nil), HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(nil), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(nil), PreemptionPolicy:(*v1.PreemptionPolicy)(nil), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil), SetHostnameAsFQDN:(*bool)(nil)}}, UpdateStrategy:v1.DaemonSetUpdateStrategy{Type:"RollingUpdate", RollingUpdate:(*v1.RollingUpdateDaemonSet)(0xc002d4b950)}, MinReadySeconds:0, RevisionHistoryLimit:(*int32)(0xc0036a56ec)}, Status:v1.DaemonSetStatus{CurrentNumberScheduled:0, NumberMisscheduled:0, DesiredNumberScheduled:0, NumberReady:0, ObservedGeneration:3, UpdatedNumberScheduled:0, NumberAvailable:0, NumberUnavailable:0, CollisionCount:(*int32)(nil), Conditions:[]v1.DaemonSetCondition(nil)}}: Operation cannot be fulfilled on daemonsets.apps "bind": the object has been modified; please apply your changes to the latest version and try again [32mapps.sh:101: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:latest: (B[m[32mapps.sh:102: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[m[32mapps.sh:103: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2 (B[mdaemonset.apps "bind" deleted +++ exit code: 0 Recording: run_rc_tests ... skipping 32 lines ... Namespace: namespace-1614075641-31884 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1614075641-31884 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 18 lines ... Namespace: namespace-1614075641-31884 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 12 lines ... Namespace: namespace-1614075641-31884 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 27 lines ... Namespace: namespace-1614075641-31884 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1614075641-31884 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1614075641-31884 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 11 lines ... Namespace: namespace-1614075641-31884 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 15 lines ... (B[m[32mcore.sh:1224: Successful get rc frontend {{.spec.replicas}}: 3 (B[mreplicationcontroller/frontend scaled E0223 10:20:43.129361 60038 replica_set.go:201] ReplicaSet has no controller: &ReplicaSet{ObjectMeta:{frontend namespace-1614075641-31884 e814ba93-c05a-4879-81ef-d378ba8a8b45 2033 2 2021-02-23 10:20:41 +0000 UTC <nil> <nil> map[app:guestbook tier:frontend] map[] [] [] [{kube-controller-manager Update v1 2021-02-23 10:20:41 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}}} {kubectl-create Update v1 2021-02-23 10:20:41 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:app":{},"f:tier":{}}},"f:spec":{"f:replicas":{},"f:selector":{".":{},"f:app":{},"f:tier":{}},"f:template":{".":{},"f:metadata":{".":{},"f:creationTimestamp":{},"f:labels":{".":{},"f:app":{},"f:tier":{}}},"f:spec":{".":{},"f:containers":{".":{},"k:{\"name\":\"php-redis\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"GET_HOSTS_FROM\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":80,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{".":{},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}}]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{app: guestbook,tier: frontend,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[app:guestbook tier:frontend] map[] [] [] []} {[] [] [{php-redis gcr.io/google_samples/gb-frontend:v4 [] [] [{ 0 80 TCP }] [] [{GET_HOSTS_FROM dns nil}] {map[] map[cpu:{{100 -3} {<nil>} 100m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc00183e4f8 <nil> ClusterFirst map[] <nil> false false false <nil> PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] <nil> nil [] <nil> <nil> <nil> map[] [] <nil>}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:3,FullyLabeledReplicas:3,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} I0223 10:20:43.137170 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/frontend" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: frontend-zhfjx" [32mcore.sh:1228: Successful get rc frontend {{.spec.replicas}}: 2 (B[m[32mcore.sh:1232: Successful get rc frontend {{.spec.replicas}}: 2 (B[merror: Expected replicas to be 3, was 2 [32mcore.sh:1236: Successful get rc frontend {{.spec.replicas}}: 2 (B[m[32mcore.sh:1240: Successful get rc frontend {{.spec.replicas}}: 2 (B[mreplicationcontroller/frontend scaled I0223 10:20:43.670810 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/frontend" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-l5csn" [32mcore.sh:1244: Successful get rc frontend {{.spec.replicas}}: 3 (B[m[32mcore.sh:1248: Successful get rc frontend {{.spec.replicas}}: 3 ... skipping 12 lines ... I0223 10:20:44.589605 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/redis-master" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: redis-master-4m496" I0223 10:20:44.595237 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/redis-master" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: redis-master-bcnnb" I0223 10:20:44.595279 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/redis-master" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: redis-master-vqh6r" I0223 10:20:44.597321 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/redis-slave" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: redis-slave-2g8tk" I0223 10:20:44.600781 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/redis-slave" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: redis-slave-7h7ml" [32mcore.sh:1262: Successful get rc redis-master {{.spec.replicas}}: 4 (B[mE0223 10:20:44.744931 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcore.sh:1263: Successful get rc redis-slave {{.spec.replicas}}: 4 (B[mreplicationcontroller "redis-master" deleted replicationcontroller "redis-slave" deleted deployment.apps/nginx-deployment created I0223 10:20:45.086142 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-76b5cd66f5 to 3" I0223 10:20:45.090405 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/nginx-deployment-76b5cd66f5" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-76b5cd66f5-dgj9w" ... skipping 7 lines ... (B[mdeployment.apps "nginx-deployment" deleted Successful message:service/expose-test-deployment exposed has:service/expose-test-deployment exposed service "expose-test-deployment" deleted Successful message:error: couldn't retrieve selectors via --selector flag or introspection: invalid deployment: no selectors, therefore cannot be exposed See 'kubectl expose -h' for help and examples has:invalid deployment: no selectors deployment.apps/nginx-deployment created I0223 10:20:45.901090 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-76b5cd66f5 to 3" I0223 10:20:45.904772 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/nginx-deployment-76b5cd66f5" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-76b5cd66f5-qqzqh" I0223 10:20:45.910740 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/nginx-deployment-76b5cd66f5" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-76b5cd66f5-r27tm" ... skipping 23 lines ... service "frontend" deleted service "frontend-2" deleted service "frontend-3" deleted service "frontend-4" deleted service "frontend-5" deleted Successful message:error: cannot expose a Node has:cannot expose Successful message:The Service "invalid-large-service-name-that-has-more-than-sixty-three-characters" is invalid: metadata.name: Invalid value: "invalid-large-service-name-that-has-more-than-sixty-three-characters": must be no more than 63 characters has:metadata.name: Invalid value E0223 10:20:48.248222 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource Successful message:service/kubernetes-serve-hostname-testing-sixty-three-characters-in-len exposed has:kubernetes-serve-hostname-testing-sixty-three-characters-in-len exposed service "kubernetes-serve-hostname-testing-sixty-three-characters-in-len" deleted Successful message:service/etcd-server exposed has:etcd-server exposed E0223 10:20:48.526944 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcore.sh:1353: Successful get service etcd-server {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: port-1 2380 (B[m[32mcore.sh:1354: Successful get service etcd-server {{(index .spec.ports 1).name}} {{(index .spec.ports 1).port}}: port-2 2379 (B[mservice "etcd-server" deleted [32mcore.sh:1360: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: frontend: (B[mreplicationcontroller "frontend" deleted [32mcore.sh:1364: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: ... skipping 19 lines ... (B[mhorizontalpodautoscaler.autoscaling/frontend autoscaled [32mcore.sh:1391: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 70 (B[mhorizontalpodautoscaler.autoscaling "frontend" deleted horizontalpodautoscaler.autoscaling/frontend autoscaled [32mcore.sh:1395: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 2 3 80 (B[mhorizontalpodautoscaler.autoscaling "frontend" deleted Error: required flag(s) "max" not set replicationcontroller "frontend" deleted [32mcore.sh:1404: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mapiVersion: apps/v1 kind: Deployment metadata: creationTimestamp: null ... skipping 24 lines ... limits: cpu: 300m requests: cpu: 300m terminationGracePeriodSeconds: 0 status: {} Error from server (NotFound): deployments.apps "nginx-deployment-resources" not found deployment.apps/nginx-deployment-resources created I0223 10:20:51.516446 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/nginx-deployment-resources" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-748ddcb48b to 3" I0223 10:20:51.520628 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/nginx-deployment-resources-748ddcb48b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-748ddcb48b-qsk5k" I0223 10:20:51.525367 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/nginx-deployment-resources-748ddcb48b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-748ddcb48b-pjftb" I0223 10:20:51.525595 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/nginx-deployment-resources-748ddcb48b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-748ddcb48b-5f9kz" [32mcore.sh:1410: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx-deployment-resources: (B[m[32mcore.sh:1411: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[m[32mcore.sh:1412: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: (B[mdeployment.apps/nginx-deployment-resources resource requirements updated I0223 10:20:51.898661 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/nginx-deployment-resources" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-7bfb7d56b6 to 1" I0223 10:20:51.903928 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/nginx-deployment-resources-7bfb7d56b6" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-7bfb7d56b6-cddz4" [32mcore.sh:1415: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 100m: (B[m[32mcore.sh:1416: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}: 100m: (B[merror: unable to find container named redis deployment.apps/nginx-deployment-resources resource requirements updated I0223 10:20:52.291759 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/nginx-deployment-resources" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-deployment-resources-748ddcb48b to 2" I0223 10:20:52.299203 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/nginx-deployment-resources-748ddcb48b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-resources-748ddcb48b-qsk5k" I0223 10:20:52.303700 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/nginx-deployment-resources" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-75dbcccf44 to 1" I0223 10:20:52.308931 60038 event.go:291] "Event occurred" object="namespace-1614075641-31884/nginx-deployment-resources-75dbcccf44" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-75dbcccf44-7srfb" [32mcore.sh:1421: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 200m: ... skipping 158 lines ... status: "True" type: Progressing observedGeneration: 4 replicas: 4 unavailableReplicas: 4 updatedReplicas: 1 error: you must specify resources by --filename when --local is set. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' [32mcore.sh:1432: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 200m: (B[m[32mcore.sh:1433: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}: 300m: (B[m[32mcore.sh:1434: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}: 300m: ... skipping 46 lines ... pod-template-hash=69dd6dcd84 Annotations: deployment.kubernetes.io/desired-replicas: 1 deployment.kubernetes.io/max-replicas: 2 deployment.kubernetes.io/revision: 1 Controlled By: Deployment/test-nginx-apps Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=test-nginx-apps pod-template-hash=69dd6dcd84 Containers: nginx: Image: k8s.gcr.io/nginx:test-cmd ... skipping 55 lines ... [32mapps.sh:243: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mdeployment.apps/nginx-deployment created I0223 10:20:56.712916 60038 event.go:291] "Event occurred" object="namespace-1614075653-6881/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-76b5cd66f5 to 3" I0223 10:20:56.717137 60038 event.go:291] "Event occurred" object="namespace-1614075653-6881/nginx-deployment-76b5cd66f5" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-76b5cd66f5-26zgl" I0223 10:20:56.722434 60038 event.go:291] "Event occurred" object="namespace-1614075653-6881/nginx-deployment-76b5cd66f5" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-76b5cd66f5-gl8s5" I0223 10:20:56.731118 60038 event.go:291] "Event occurred" object="namespace-1614075653-6881/nginx-deployment-76b5cd66f5" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-76b5cd66f5-xvkth" E0223 10:20:56.807005 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mapps.sh:247: Successful get rs {{range.items}}{{.spec.replicas}}{{end}}: 3 (B[mdeployment.apps "nginx-deployment" deleted [32mapps.sh:251: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:255: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:256: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mI0223 10:20:57.260918 60038 event.go:291] "Event occurred" object="namespace-1614075653-6881/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-f549558c6 to 1" ... skipping 35 lines ... [32mapps.sh:305: Successful get deployment.apps {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[m Image: k8s.gcr.io/nginx:test-cmd deployment.apps/nginx rolled back (server dry run) [32mapps.sh:309: Successful get deployment.apps {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[mdeployment.apps/nginx rolled back [32mapps.sh:313: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[merror: unable to find specified revision 1000000 in history [32mapps.sh:316: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[mdeployment.apps/nginx rolled back [32mapps.sh:320: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[mdeployment.apps/nginx paused error: you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/nginx' and try again error: deployments.apps "nginx" can't restart paused deployment (run rollout resume first) deployment.apps/nginx resumed deployment.apps/nginx rolled back deployment.kubernetes.io/revision-history: 1,3 error: desired revision (3) is different from the running revision (5) deployment.apps/nginx restarted I0223 10:21:04.151619 60038 event.go:291] "Event occurred" object="namespace-1614075653-6881/nginx" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-54785cbcb8 to 2" I0223 10:21:04.162513 60038 event.go:291] "Event occurred" object="namespace-1614075653-6881/nginx-54785cbcb8" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-54785cbcb8-kr5zd" I0223 10:21:04.167646 60038 event.go:291] "Event occurred" object="namespace-1614075653-6881/nginx" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-6f787d4899 to 1" I0223 10:21:04.171289 60038 event.go:291] "Event occurred" object="namespace-1614075653-6881/nginx-6f787d4899" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-6f787d4899-ktldx" Successful ... skipping 81 lines ... (B[m[32mapps.sh:364: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: (B[mdeployment.apps/nginx-deployment image updated I0223 10:21:07.116952 60038 event.go:291] "Event occurred" object="namespace-1614075653-6881/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-6dd48b9849 to 1" I0223 10:21:07.120839 60038 event.go:291] "Event occurred" object="namespace-1614075653-6881/nginx-deployment-6dd48b9849" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-6dd48b9849-whtvh" [32mapps.sh:367: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[m[32mapps.sh:368: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: (B[merror: unable to find container named "redis" deployment.apps/nginx-deployment image updated [32mapps.sh:373: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[m[32mapps.sh:374: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: (B[mdeployment.apps/nginx-deployment image updated [32mapps.sh:377: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[m[32mapps.sh:378: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: ... skipping 69 lines ... (B[mreplicaset.apps/frontend created I0223 10:21:12.497060 60038 event.go:291] "Event occurred" object="namespace-1614075672-24535/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-86bhg" +++ [0223 10:21:12] Deleting rs I0223 10:21:12.500816 60038 event.go:291] "Event occurred" object="namespace-1614075672-24535/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-rv5ch" I0223 10:21:12.501001 60038 event.go:291] "Event occurred" object="namespace-1614075672-24535/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-cwdqr" replicaset.apps "frontend" deleted E0223 10:21:12.652846 60038 replica_set.go:532] sync "namespace-1614075672-24535/frontend" failed with replicasets.apps "frontend" not found [32mapps.sh:547: Successful get pods -l "tier=frontend" {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:551: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicaset.apps/frontend created I0223 10:21:12.964363 60038 event.go:291] "Event occurred" object="namespace-1614075672-24535/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-5tsjq" I0223 10:21:12.968332 60038 event.go:291] "Event occurred" object="namespace-1614075672-24535/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-wzzql" I0223 10:21:12.968372 60038 event.go:291] "Event occurred" object="namespace-1614075672-24535/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-lx9tp" [32mapps.sh:555: Successful get pods -l "tier=frontend" {{range.items}}{{(index .spec.containers 0).name}}:{{end}}: php-redis:php-redis:php-redis: (B[m+++ [0223 10:21:13] Deleting rs replicaset.apps "frontend" deleted E0223 10:21:13.249942 60038 replica_set.go:532] sync "namespace-1614075672-24535/frontend" failed with replicasets.apps "frontend" not found [32mapps.sh:559: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:561: Successful get pods -l "tier=frontend" {{range.items}}{{(index .spec.containers 0).name}}:{{end}}: php-redis:php-redis:php-redis: (B[mpod "frontend-5tsjq" deleted pod "frontend-lx9tp" deleted pod "frontend-wzzql" deleted [32mapps.sh:564: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: ... skipping 16 lines ... Namespace: namespace-1614075672-24535 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1614075672-24535 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 18 lines ... Namespace: namespace-1614075672-24535 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 12 lines ... Namespace: namespace-1614075672-24535 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 25 lines ... Namespace: namespace-1614075672-24535 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1614075672-24535 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1614075672-24535 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 11 lines ... Namespace: namespace-1614075672-24535 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 9 lines ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal SuccessfulCreate 1s replicaset-controller Created pod: frontend-w2hq9 Normal SuccessfulCreate 1s replicaset-controller Created pod: frontend-b6hrp Normal SuccessfulCreate 1s replicaset-controller Created pod: frontend-l287h (B[mE0223 10:21:14.888633 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource matched Name: matched Image: matched Node: matched Labels: matched Status: matched Controlled By ... skipping 166 lines ... [32mapps.sh:662: Successful get rs frontend {{.metadata.generation}}: 5 (B[mSuccessful message:kube-controller-manager kubectl-create kubectl-set has:kubectl-set [32mapps.sh:670: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: frontend: (B[mreplicaset.apps "frontend" deleted E0223 10:21:20.860645 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mapps.sh:674: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:678: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicaset.apps/frontend created I0223 10:21:21.177853 60038 event.go:291] "Event occurred" object="namespace-1614075672-24535/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-fxskc" I0223 10:21:21.183815 60038 event.go:291] "Event occurred" object="namespace-1614075672-24535/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-j9kjq" I0223 10:21:21.183850 60038 event.go:291] "Event occurred" object="namespace-1614075672-24535/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-ns6xj" ... skipping 17 lines ... horizontalpodautoscaler.autoscaling/frontend autoscaled [32mapps.sh:706: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 2 3 80 (B[mSuccessful message:kubectl-autoscale has:kubectl-autoscale horizontalpodautoscaler.autoscaling "frontend" deleted Error: required flag(s) "max" not set replicaset.apps "frontend" deleted +++ exit code: 0 Recording: run_stateful_set_tests Running command: run_stateful_set_tests +++ Running case: test-cmd.run_stateful_set_tests ... skipping 64 lines ... [32mapps.sh:466: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/pause:2.0: (B[m[32mapps.sh:467: Successful get statefulset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2 (B[mstatefulset.apps/nginx rolled back [32mapps.sh:470: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx-slim:0.7: (B[m[32mapps.sh:471: Successful get statefulset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mSuccessful message:error: unable to find specified revision 1000000 in history has:unable to find specified revision [32mapps.sh:475: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx-slim:0.7: (B[m[32mapps.sh:476: Successful get statefulset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mstatefulset.apps/nginx rolled back [32mapps.sh:479: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx-slim:0.8: (B[m[32mapps.sh:480: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/pause:2.0: ... skipping 61 lines ... Name: mock Namespace: namespace-1614075688-29670 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: k8s.gcr.io/pause:3.4.1 Port: 9949/TCP ... skipping 59 lines ... Name: mock Namespace: namespace-1614075688-29670 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: k8s.gcr.io/pause:3.4.1 Port: 9949/TCP ... skipping 59 lines ... Name: mock Namespace: namespace-1614075688-29670 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: k8s.gcr.io/pause:3.4.1 Port: 9949/TCP ... skipping 41 lines ... Namespace: namespace-1614075688-29670 Selector: app=mock Labels: app=mock status=replaced Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: k8s.gcr.io/pause:3.4.1 Port: 9949/TCP ... skipping 11 lines ... Namespace: namespace-1614075688-29670 Selector: app=mock2 Labels: app=mock2 status=replaced Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock2 Containers: mock-container: Image: k8s.gcr.io/pause:3.4.1 Port: 9949/TCP ... skipping 93 lines ... (B[m[32mgeneric-resources.sh:174: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mservice/mock created replicationcontroller/mock created I0223 10:21:40.771303 60038 event.go:291] "Event occurred" object="namespace-1614075688-29670/mock" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: mock-rtq4f" [32mgeneric-resources.sh:180: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: mock: (B[m[32mgeneric-resources.sh:181: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: mock: (B[mE0223 10:21:41.106953 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource service "mock" deleted replicationcontroller "mock" deleted [32mgeneric-resources.sh:187: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mgeneric-resources.sh:188: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[m+++ exit code: 0 Recording: run_persistent_volumes_tests ... skipping 3 lines ... +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_persistent_volumes_tests +++ [0223 10:21:41] Creating namespace namespace-1614075701-18140 namespace/namespace-1614075701-18140 created Context "test" modified. +++ [0223 10:21:41] Testing persistent volumes E0223 10:21:41.569018 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mstorage.sh:30: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: (B[mpersistentvolume/pv0001 created E0223 10:21:41.824428 60038 pv_protection_controller.go:118] PV pv0001 failed with : Operation cannot be fulfilled on persistentvolumes "pv0001": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:33: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0001: (B[mpersistentvolume "pv0001" deleted persistentvolume/pv0002 created [32mstorage.sh:36: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0002: (B[mpersistentvolume "pv0002" deleted persistentvolume/pv0003 created ... skipping 77 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 10:16:10 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 10:16:10 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 31 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 10:16:10 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 10:16:10 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 38 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 10:16:10 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 10:16:10 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 10:16:10 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 29 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Tue, 23 Feb 2021 10:16:10 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Tue, 23 Feb 2021 10:16:10 +0000 Tue, 23 Feb 2021 10:17:11 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 132 lines ... yes has:the server doesn't have a resource type Successful message:yes has:yes Successful message:error: --subresource can not be used with NonResourceURL has:subresource can not be used with NonResourceURL Successful Successful message:yes 0 has:0 ... skipping 59 lines ... {Verbs:[get list watch] APIGroups:[] Resources:[configmaps] ResourceNames:[] NonResourceURLs:[]} [32mlegacy-script.sh:846: Successful get rolebindings -n some-other-random -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-RB: (B[m[32mlegacy-script.sh:847: Successful get roles -n some-other-random -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-R: (B[m[32mlegacy-script.sh:848: Successful get clusterrolebindings -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-CRB: (B[m[32mlegacy-script.sh:849: Successful get clusterroles -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-CR: (B[mSuccessful message:error: only rbac.authorization.k8s.io/v1 is supported: not *v1beta1.ClusterRole has:only rbac.authorization.k8s.io/v1 is supported rolebinding.rbac.authorization.k8s.io "testing-RB" deleted role.rbac.authorization.k8s.io "testing-R" deleted warning: deleting cluster-scoped resources, not scoped to the provided namespace clusterrole.rbac.authorization.k8s.io "testing-CR" deleted clusterrolebinding.rbac.authorization.k8s.io "testing-CRB" deleted ... skipping 24 lines ... [32mdiscovery.sh:91: Successful get all -l'app=cassandra' {{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}: cassandra:cassandra:cassandra:cassandra: (B[mpod "cassandra-998g9" deleted I0223 10:21:51.814316 60038 event.go:291] "Event occurred" object="namespace-1614075711-17600/cassandra" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: cassandra-7sphr" pod "cassandra-xwb4t" deleted I0223 10:21:51.823226 60038 event.go:291] "Event occurred" object="namespace-1614075711-17600/cassandra" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: cassandra-6kh4k" replicationcontroller "cassandra" deleted E0223 10:21:51.838092 60038 replica_set.go:532] sync "namespace-1614075711-17600/cassandra" failed with replicationcontrollers "cassandra" not found service "cassandra" deleted +++ exit code: 0 Recording: run_kubectl_explain_tests Running command: run_kubectl_explain_tests +++ Running case: test-cmd.run_kubectl_explain_tests ... skipping 172 lines ... (B[mwarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mget.sh:283: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mget.sh:288: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/sorted-pod1 created [32mget.sh:292: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: sorted-pod1: (B[mE0223 10:21:54.477429 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource pod/sorted-pod2 created [32mget.sh:296: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: sorted-pod1:sorted-pod2: (B[mE0223 10:21:54.790851 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource pod/sorted-pod3 created [32mget.sh:300: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: sorted-pod1:sorted-pod2:sorted-pod3: (B[mSuccessful message:sorted-pod1:sorted-pod2:sorted-pod3: has:sorted-pod1:sorted-pod2:sorted-pod3: Successful ... skipping 759 lines ... message:node/127.0.0.1 already uncordoned (server dry run) has:already uncordoned [32mnode-management.sh:145: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[mnode/127.0.0.1 labeled [32mnode-management.sh:150: Successful get nodes 127.0.0.1 {{.metadata.labels.test}}: label (B[mSuccessful message:error: cannot specify both a node name and a --selector option See 'kubectl drain -h' for help and examples has:cannot specify both a node name Successful message:error: USAGE: cordon NODE [flags] See 'kubectl cordon -h' for help and examples has:error\: USAGE\: cordon NODE node/127.0.0.1 already uncordoned Successful message:error: You must provide one or more resources by argument or filename. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' '<resource> <name>' '<resource>' has:must provide one or more resources ... skipping 14 lines ... +++ [0223 10:22:22] Testing kubectl plugins Successful message:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/version/kubectl-version - warning: kubectl-version overwrites existing command: "kubectl version" error: one plugin warning was found has:kubectl-version overwrites existing command: "kubectl version" Successful message:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/kubectl-foo test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo - warning: test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin: test/fixtures/pkg/kubectl/plugins/kubectl-foo error: one plugin warning was found has:test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin Successful message:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/kubectl-foo has:plugins are available Successful message:Unable to read directory "test/fixtures/pkg/kubectl/plugins/empty" from your PATH: open test/fixtures/pkg/kubectl/plugins/empty: no such file or directory. Skipping... error: unable to find any kubectl plugins in your PATH has:unable to find any kubectl plugins in your PATH Successful message:I am plugin foo has:plugin foo Successful message:I am plugin bar called with args test/fixtures/pkg/kubectl/plugins/bar/kubectl-bar arg1 ... skipping 10 lines ... +++ Running case: test-cmd.run_impersonation_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_impersonation_tests +++ [0223 10:22:22] Testing impersonation Successful message:error: requesting groups or user-extra for test-admin without impersonating a user has:without impersonating a user Warning: certificates.k8s.io/v1beta1 CertificateSigningRequest is deprecated in v1.19+, unavailable in v1.22+; use certificates.k8s.io/v1 CertificateSigningRequest certificatesigningrequest.certificates.k8s.io/foo created [32mauthorization.sh:68: Successful get csr/foo {{.spec.username}}: user1 (B[m[32mauthorization.sh:69: Successful get csr/foo {{range .spec.groups}}{{.}}{{end}}: system:authenticated (B[mWarning: certificates.k8s.io/v1beta1 CertificateSigningRequest is deprecated in v1.19+, unavailable in v1.22+; use certificates.k8s.io/v1 CertificateSigningRequest certificatesigningrequest.certificates.k8s.io "foo" deleted Warning: certificates.k8s.io/v1beta1 CertificateSigningRequest is deprecated in v1.19+, unavailable in v1.22+; use certificates.k8s.io/v1 CertificateSigningRequest certificatesigningrequest.certificates.k8s.io/foo created E0223 10:22:23.468103 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mauthorization.sh:74: Successful get csr/foo {{len .spec.groups}}: 4 (B[m[32mauthorization.sh:75: Successful get csr/foo {{range .spec.groups}}{{.}} {{end}}: group2 group1 ,,,chameleon system:authenticated (B[mWarning: certificates.k8s.io/v1beta1 CertificateSigningRequest is deprecated in v1.19+, unavailable in v1.22+; use certificates.k8s.io/v1 CertificateSigningRequest certificatesigningrequest.certificates.k8s.io "foo" deleted +++ exit code: 0 Recording: run_wait_tests ... skipping 10 lines ... I0223 10:22:23.983113 60038 event.go:291] "Event occurred" object="namespace-1614075743-7953/test-1-7487ff9cbb" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-1-7487ff9cbb-nsnxw" deployment.apps/test-1 created I0223 10:22:24.090645 60038 event.go:291] "Event occurred" object="namespace-1614075743-7953/test-2" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set test-2-646997777c to 1" I0223 10:22:24.096411 60038 event.go:291] "Event occurred" object="namespace-1614075743-7953/test-2-646997777c" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-2-646997777c-rbb8k" deployment.apps/test-2 created [32mwait.sh:36: Successful get deployments {{range .items}}{{.metadata.name}},{{end}}: test-1,test-2, (B[mE0223 10:22:26.061333 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource deployment.apps "test-1" deleted deployment.apps "test-2" deleted Successful message:deployment.apps/test-1 condition met deployment.apps/test-2 condition met has:test-1 condition met Successful message:deployment.apps/test-1 condition met deployment.apps/test-2 condition met has:test-2 condition met +++ exit code: 0 E0223 10:22:26.347057 60038 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource Recording: run_kubectl_debug_pod_tests Running command: run_kubectl_debug_pod_tests +++ Running case: test-cmd.run_kubectl_debug_pod_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_kubectl_debug_pod_tests ... skipping 70 lines ... I0223 10:22:30.403762 56345 establishing_controller.go:87] Shutting down EstablishingController I0223 10:22:30.403774 56345 naming_controller.go:302] Shutting down NamingConditionController I0223 10:22:30.403816 56345 tlsconfig.go:255] Shutting down DynamicServingCertificateController I0223 10:22:30.403834 56345 dynamic_serving_content.go:145] Shutting down serving-cert::/tmp/apiserver.crt::/tmp/apiserver.key I0223 10:22:30.403957 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.404010 56345 secure_serving.go:241] Stopped listening on 127.0.0.1:6443 E0223 10:22:30.404090 56345 controller.go:184] rpc error: code = Unavailable desc = transport is closing I0223 10:22:30.404281 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.404284 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.404292 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.404394 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.404494 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.404569 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.404633 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.404651 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.404776 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.404855 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.404882 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.404944 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.404947 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.404975 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.405044 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.405058 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405073 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405116 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405147 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405160 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405180 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick ... skipping 7 lines ... I0223 10:22:30.405467 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405470 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405491 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405561 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405572 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405587 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.405612 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.405646 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.405670 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405685 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.405718 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.405743 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405758 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405770 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.405774 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.405847 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405847 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405858 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405894 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405943 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405952 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.405978 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.406005 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.406018 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.406031 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.406085 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.406117 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.406122 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.406152 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.406210 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.406213 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.406211 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.406285 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.406293 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.406308 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.406340 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.406377 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.406396 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.406403 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.406454 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.406485 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.406493 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.406511 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.406569 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.406573 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.406606 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.406620 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.406657 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.406668 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.406701 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.406717 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.406745 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.406776 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.406792 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.406820 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.406828 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.406843 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.406878 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.406891 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.406925 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.406929 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.406924 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.406989 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.407010 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I0223 10:22:30.407046 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.407078 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.407122 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.407233 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.407258 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.407344 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407414 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.407446 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.407471 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407501 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407521 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407553 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407571 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I0223 10:22:30.407359 56345 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W0223 10:22:30.407606 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407639 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407655 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407689 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407707 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407744 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407754 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407800 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407802 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407829 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407864 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407883 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407921 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407938 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407970 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.407987 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408022 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408037 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408072 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408088 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408119 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408120 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408135 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408165 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408226 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408272 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408296 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408301 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408323 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408356 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408411 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408416 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408426 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408464 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W0223 10:22:30.408466 56345 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... junit report dir: /logs/artifacts +++ [0223 10:22:30] Clean up complete + make test-integration +++ [0223 10:22:34] Checking etcd is on PATH /home/prow/go/src/k8s.io/kubernetes/third_party/etcd/etcd +++ [0223 10:22:34] Starting etcd instance etcd --advertise-client-urls http://127.0.0.1:2379 --data-dir /tmp/tmp.0qT6t1iwHM --listen-client-urls http://127.0.0.1:2379 --log-level=debug > "/logs/artifacts/etcd.c003772a-75be-11eb-a510-0e7563c59f3a.root.log.DEBUG.20210223-102235.95612" 2>/dev/null Waiting for etcd to come up. +++ [0223 10:22:35] On try 2, etcd: : {"health":"true"} {"header":{"cluster_id":"14841639068965178418","member_id":"10276657743932975437","revision":"2","raft_term":"2"}}+++ [0223 10:22:35] Running integration test cases +++ [0223 10:22:40] Running tests without code coverage {"component":"entrypoint","file":"prow/entrypoint/run.go:169","func":"k8s.io/test-infra/prow/entrypoint.Options.ExecuteProcess","level":"error","msg":"Entrypoint received interrupt: terminated","severity":"error","time":"2021-02-23T10:22:47Z"}