PR | wojtek-t: Clear shutdown of scheduler metrics recorder |
Result | ABORTED |
Tests | 0 failed / 134 succeeded |
Started | |
Elapsed | 17m53s |
Revision | b4fa2e058474bb629164ecd4908e580beee4aa5f |
Refs |
110148 |
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/shell_not_expected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/unsupported_shell_type
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/accept_a_valid_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_negative_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_non-string_port
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_too_large_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_v1beta1_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_current_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_v1beta2_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_v1beta3_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/don't_allow_mixed_arguments_v1beta2
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/don't_allow_mixed_arguments_v1beta3
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/fail_on_non_existing_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/feature_gate_PublicKeysECDSA=true
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/no_feature_gates_passed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/invalid_semantic_version_string_is_detected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/valid_version_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_non-lowercase
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_size
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/valid_token_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed/discovery-token_and_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_and_discovery-file_can't_both_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_or_discovery-file_must_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/invalid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/valid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName/valid_node_name
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/invalid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/no_token_provided
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerate
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerateTypoError
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/default_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/invalid_output_option
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/short_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/json_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/yaml_output
test-cmd run_RESTMapper_evaluation_tests
test-cmd run_assert_categories_tests
test-cmd run_assert_short_name_tests
test-cmd run_authorization_tests
test-cmd run_certificates_tests
test-cmd run_client_config_tests
test-cmd run_cluster_management_tests
test-cmd run_clusterroles_tests
test-cmd run_configmap_tests
test-cmd run_convert_tests
test-cmd run_crd_deletion_recreation_tests
test-cmd run_crd_tests
test-cmd run_create_job_tests
test-cmd run_create_secret_tests
test-cmd run_daemonset_history_tests
test-cmd run_daemonset_tests
test-cmd run_deployment_tests
test-cmd run_deprecated_api_tests
test-cmd run_exec_credentials_interactive_tests
test-cmd run_exec_credentials_tests
test-cmd run_impersonation_tests
test-cmd run_job_tests
test-cmd run_kubectl_all_namespace_tests
test-cmd run_kubectl_apply_deployments_tests
test-cmd run_kubectl_apply_tests
test-cmd run_kubectl_config_set_cluster_tests
test-cmd run_kubectl_config_set_credentials_tests
test-cmd run_kubectl_config_set_tests
test-cmd run_kubectl_create_error_tests
test-cmd run_kubectl_create_filter_tests
test-cmd run_kubectl_create_kustomization_directory_tests
test-cmd run_kubectl_create_validate_tests
test-cmd run_kubectl_debug_node_tests
test-cmd run_kubectl_debug_pod_tests
test-cmd run_kubectl_delete_allnamespaces_tests
test-cmd run_kubectl_diff_same_names
test-cmd run_kubectl_diff_tests
test-cmd run_kubectl_exec_pod_tests
test-cmd run_kubectl_exec_resource_name_tests
test-cmd run_kubectl_explain_tests
test-cmd run_kubectl_get_tests
test-cmd run_kubectl_local_proxy_tests
test-cmd run_kubectl_request_timeout_tests
test-cmd run_kubectl_results_tests
test-cmd run_kubectl_run_tests
test-cmd run_kubectl_server_side_apply_tests
test-cmd run_kubectl_sort_by_tests
test-cmd run_kubectl_version_tests
test-cmd run_lists_tests
test-cmd run_multi_resources_tests
test-cmd run_namespace_tests
test-cmd run_nodes_tests
test-cmd run_persistent_volume_claims_tests
test-cmd run_persistent_volumes_tests
test-cmd run_plugins_tests
test-cmd run_pod_templates_tests
test-cmd run_pod_tests
test-cmd run_rc_tests
test-cmd run_recursive_resources_tests
test-cmd run_resource_aliasing_tests
test-cmd run_retrieve_multiple_tests
test-cmd run_role_tests
test-cmd run_rs_tests
test-cmd run_save_config_tests
test-cmd run_secrets_test
test-cmd run_service_accounts_tests
test-cmd run_service_tests
test-cmd run_stateful_set_tests
test-cmd run_statefulset_history_tests
test-cmd run_storage_class_tests
test-cmd run_swagger_tests
test-cmd run_template_output_tests
test-cmd run_wait_tests
... skipping 75 lines ... Recording: record_command_canary Running command: record_command_canary +++ Running case: test-cmd.record_command_canary +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: record_command_canary /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh: line 162: bogus-expected-to-fail: command not found !!! [0520 17:04:02] Call tree: !!! [0520 17:04:02] 1: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:47 record_command_canary(...) !!! [0520 17:04:02] 2: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:112 eVal(...) !!! [0520 17:04:02] 3: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:138 juLog(...) !!! [0520 17:04:02] 4: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:166 record_command(...) !!! [0520 17:04:02] 5: hack/make-rules/test-cmd.sh:35 source(...) +++ exit code: 1 +++ error: 1 +++ [0520 17:04:02] Running kubeadm tests +++ [0520 17:04:03] Building go targets for linux/amd64 k8s.io/kubernetes/hack/make-rules/helpers/go2make (non-static) +++ [0520 17:04:06] Building go targets for linux/amd64 k8s.io/kubernetes/cmd/kubeadm (static) +++ [0520 17:04:50] Building go targets for linux/amd64 ... skipping 220 lines ... k8s.io/kubernetes/hack/make-rules/helpers/go2make (non-static) +++ [0520 17:07:57] Building go targets for linux/amd64 k8s.io/kubernetes/cmd/kube-controller-manager (static) +++ [0520 17:08:28] Generate kubeconfig for controller-manager +++ [0520 17:08:28] Starting controller-manager I0520 17:08:29.033585 56621 serving.go:348] Generated self-signed cert in-memory W0520 17:08:29.521126 56621 authentication.go:423] failed to read in-cluster kubeconfig for delegated authentication: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W0520 17:08:29.521184 56621 authentication.go:317] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. W0520 17:08:29.521197 56621 authentication.go:341] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. W0520 17:08:29.521218 56621 authorization.go:225] failed to read in-cluster kubeconfig for delegated authorization: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W0520 17:08:29.521233 56621 authorization.go:193] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. I0520 17:08:29.521258 56621 controllermanager.go:180] Version: v1.25.0-alpha.0.585+e3af8567ce785a I0520 17:08:29.521280 56621 controllermanager.go:182] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" I0520 17:08:29.522787 56621 secure_serving.go:210] Serving securely on [::]:10257 I0520 17:08:29.522966 56621 tlsconfig.go:240] "Starting DynamicServingCertificateController" I0520 17:08:29.523179 56621 leaderelection.go:248] attempting to acquire leader lease kube-system/kube-controller-manager... ... skipping 18 lines ... I0520 17:08:29.563696 56621 dynamic_serving_content.go:132] "Starting controller" name="csr-controller::hack/testdata/ca/ca.crt::hack/testdata/ca/ca.key" I0520 17:08:29.563937 56621 controllermanager.go:593] Started "csrsigning" I0520 17:08:29.564037 56621 certificate_controller.go:119] Starting certificate controller "csrsigning-legacy-unknown" I0520 17:08:29.564055 56621 shared_informer.go:255] Waiting for caches to sync for certificate-csrsigning-legacy-unknown I0520 17:08:29.564086 56621 dynamic_serving_content.go:132] "Starting controller" name="csr-controller::hack/testdata/ca/ca.crt::hack/testdata/ca/ca.key" I0520 17:08:29.564169 56621 node_lifecycle_controller.go:77] Sending events to api server E0520 17:08:29.564195 56621 core.go:211] failed to start cloud node lifecycle controller: no cloud provider provided W0520 17:08:29.564208 56621 controllermanager.go:571] Skipping "cloud-node-lifecycle" W0520 17:08:29.564446 56621 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. W0520 17:08:29.564491 56621 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. E0520 17:08:29.564528 56621 core.go:91] Failed to start service controller: WARNING: no cloud provider provided, services of type LoadBalancer will fail W0520 17:08:29.564545 56621 controllermanager.go:571] Skipping "service" W0520 17:08:29.564721 56621 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. W0520 17:08:29.564744 56621 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. W0520 17:08:29.564794 56621 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. W0520 17:08:29.564812 56621 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0520 17:08:29.564885 56621 controllermanager.go:593] Started "statefulset" ... skipping 192 lines ... I0520 17:08:29.988874 56621 shared_informer.go:262] Caches are synced for PV protection I0520 17:08:29.991302 56621 shared_informer.go:262] Caches are synced for resource quota I0520 17:08:30.384948 56621 shared_informer.go:262] Caches are synced for garbage collector I0520 17:08:30.384991 56621 garbagecollector.go:158] Garbage collector: all resource monitors have synced. Proceeding to collect garbage I0520 17:08:30.417651 56621 shared_informer.go:262] Caches are synced for garbage collector node/127.0.0.1 created W0520 17:08:30.945217 56621 actual_state_of_world.go:541] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="127.0.0.1" does not exist +++ [0520 17:08:30] Checking kubectl version WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. Use --output=yaml|json to get the full version. Client Version: version.Info{Major:"1", Minor:"25+", GitVersion:"v1.25.0-alpha.0.585+e3af8567ce785a", GitCommit:"e3af8567ce785acb8ee5ab4090efcc7173333314", GitTreeState:"clean", BuildDate:"2022-05-20T10:45:13Z", GoVersion:"go1.18.2", Compiler:"gc", Platform:"linux/amd64"} Kustomize Version: v4.5.4 Server Version: version.Info{Major:"1", Minor:"25+", GitVersion:"v1.25.0-alpha.0.585+e3af8567ce785a", GitCommit:"e3af8567ce785acb8ee5ab4090efcc7173333314", GitTreeState:"clean", BuildDate:"2022-05-20T10:45:13Z", GoVersion:"go1.18.2", Compiler:"gc", Platform:"linux/amd64"} The Service "kubernetes" is invalid: spec.clusterIPs: Invalid value: []string{"10.0.0.1"}: failed to allocate IP 10.0.0.1: provided IP is already allocated NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 40s Recording: run_kubectl_version_tests Running command: run_kubectl_version_tests +++ Running case: test-cmd.run_kubectl_version_tests ... skipping 196 lines ... +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_RESTMapper_evaluation_tests +++ [0520 17:08:36] Creating namespace namespace-1653066516-11685 namespace/namespace-1653066516-11685 created Context "test" modified. +++ [0520 17:08:36] Testing RESTMapper +++ [0520 17:08:37] "kubectl get unknownresourcetype" returns error as expected: error: the server doesn't have a resource type "unknownresourcetype" +++ exit code: 0 NAME SHORTNAMES APIVERSION NAMESPACED KIND bindings v1 true Binding componentstatuses cs v1 false ComponentStatus configmaps cm v1 true ConfigMap endpoints ep v1 true Endpoints ... skipping 60 lines ... namespace/namespace-1653066525-5863 created Context "test" modified. +++ [0520 17:08:46] Testing clusterroles [32mrbac.sh:29: Successful get clusterroles/cluster-admin {{.metadata.name}}: cluster-admin (B[m[32mrbac.sh:30: Successful get clusterrolebindings/cluster-admin {{.metadata.name}}: cluster-admin (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created (dry run) clusterrole.rbac.authorization.k8s.io/pod-admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:42: Successful get clusterrole/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[m[32mSuccessful (B[mmessage:warning: deleting cluster-scoped resources, not scoped to the provided namespace clusterrole.rbac.authorization.k8s.io "pod-admin" deleted ... skipping 18 lines ... (B[mclusterrole.rbac.authorization.k8s.io/url-reader created [32mrbac.sh:61: Successful get clusterrole/url-reader {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: get: (B[m[32mrbac.sh:62: Successful get clusterrole/url-reader {{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}: /logs/*:/healthz/*: (B[mclusterrole.rbac.authorization.k8s.io/aggregation-reader created [32mrbac.sh:64: Successful get clusterrole/aggregation-reader {{.metadata.name}}: aggregation-reader (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created [32mrbac.sh:77: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: (B[mclusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (server dry run) [32mrbac.sh:80: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: ... skipping 64 lines ... [32mrbac.sh:102: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin:foo:test-all-user: (B[m[32mrbac.sh:103: Successful get clusterrolebinding/super-group {{range.subjects}}{{.name}}:{{end}}: the-group:foo:test-all-user: (B[m[32mrbac.sh:104: Successful get clusterrolebinding/super-sa {{range.subjects}}{{.name}}:{{end}}: sa-name:foo:test-all-user: (B[mrolebinding.rbac.authorization.k8s.io/admin created (dry run) rolebinding.rbac.authorization.k8s.io/admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): rolebindings.rbac.authorization.k8s.io "admin" not found has: not found rolebinding.rbac.authorization.k8s.io/admin created [32mrbac.sh:113: Successful get rolebinding/admin {{.roleRef.kind}}: ClusterRole (B[m[32mrbac.sh:114: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin: (B[mrolebinding.rbac.authorization.k8s.io/admin subjects updated [32mrbac.sh:116: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin:foo: ... skipping 152 lines ... namespace/namespace-1653066535-14971 created Context "test" modified. +++ [0520 17:08:55] Testing role role.rbac.authorization.k8s.io/pod-admin created (dry run) role.rbac.authorization.k8s.io/pod-admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): roles.rbac.authorization.k8s.io "pod-admin" not found has: not found role.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:159: Successful get role/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[m[32mrbac.sh:160: Successful get role/pod-admin {{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}: pods: (B[m[32mrbac.sh:161: Successful get role/pod-admin {{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}: : (B[m[32mSuccessful ... skipping 439 lines ... has:valid-pod [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 0s has:valid-pod [32mcore.sh:194: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: resource(s) were provided, but no name was specified [32mcore.sh:198: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: setting 'all' parameter but found a non empty selector. [32mcore.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:210: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mwarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mcore.sh:214: Successful get pods -l'name in (valid-pod)' {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mcore.sh:219: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:: : ... skipping 30 lines ... I0520 17:09:09.484871 61423 round_trippers.go:553] GET https://127.0.0.1:6443/apis/policy/v1/namespaces/test-kubectl-describe-pod/poddisruptionbudgets/test-pdb-2 200 OK in 1 milliseconds I0520 17:09:09.486785 61423 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-kubectl-describe-pod/events?fieldSelector=involvedObject.name%3Dtest-pdb-2%2CinvolvedObject.namespace%3Dtest-kubectl-describe-pod%2CinvolvedObject.kind%3DPodDisruptionBudget%2CinvolvedObject.uid%3D14fd0add-3b06-46fb-8443-13d5b4b176b8&limit=500 200 OK in 1 milliseconds (B[mpoddisruptionbudget.policy/test-pdb-3 created [32mcore.sh:271: Successful get pdb/test-pdb-3 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 2 (B[mpoddisruptionbudget.policy/test-pdb-4 created [32mcore.sh:275: Successful get pdb/test-pdb-4 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 50% (B[merror: min-available and max-unavailable cannot be both specified [32mcore.sh:281: Successful get pods --namespace=test-kubectl-describe-pod {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/env-test-pod created matched TEST_CMD_1 matched <set to the key 'key-1' in secret 'test-secret'> matched TEST_CMD_2 matched <set to the key 'key-2' of config map 'test-configmap'> ... skipping 242 lines ... [32mcore.sh:542: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:3.7: (B[m[32mSuccessful (B[mmessage:kubectl-create kubectl-patch has:kubectl-patch pod/valid-pod patched [32mcore.sh:562: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx: (B[m+++ [0520 17:09:27] "kubectl patch with resourceVersion 594" returns error as expected: Error from server (Conflict): Operation cannot be fulfilled on pods "valid-pod": the object has been modified; please apply your changes to the latest version and try again pod "valid-pod" deleted pod/valid-pod replaced [32mcore.sh:586: Successful get pod valid-pod {{(index .spec.containers 0).name}}: replaced-k8s-serve-hostname (B[m[32mSuccessful (B[mmessage:kubectl-replace has:kubectl-replace [32mSuccessful (B[mmessage:error: --grace-period must have --force specified has:\-\-grace-period must have \-\-force specified [32mSuccessful (B[mmessage:error: --timeout must have --force specified has:\-\-timeout must have \-\-force specified node/node-v1-test created W0520 17:09:28.780088 56621 actual_state_of_world.go:541] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="node-v1-test" does not exist [32mcore.sh:614: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mnode/node-v1-test replaced (server dry run) node/node-v1-test replaced (dry run) [32mcore.sh:639: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mnode/node-v1-test replaced [32mcore.sh:655: Successful get node node-v1-test {{.metadata.annotations.a}}: b ... skipping 29 lines ... spec: containers: - image: k8s.gcr.io/pause:3.7 name: kubernetes-pause has:localonlyvalue [32mcore.sh:691: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[merror: 'name' already has a value (valid-pod), and --overwrite is false [32mcore.sh:695: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[m[32mcore.sh:699: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[mpod/valid-pod labeled [32mcore.sh:703: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod-super-sayan (B[m[32mcore.sh:707: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mwarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. ... skipping 84 lines ... +++ Running case: test-cmd.run_kubectl_create_error_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_kubectl_create_error_tests +++ [0520 17:09:39] Creating namespace namespace-1653066579-7084 namespace/namespace-1653066579-7084 created Context "test" modified. +++ [0520 17:09:39] Testing kubectl create with error Error: must specify one of -f and -k Create a resource from a file or from stdin. JSON and YAML formats are accepted. Examples: ... skipping 63 lines ... If true, keep the managedFields when printing objects in JSON or YAML format. --template='': Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. --validate='strict': Must be one of: strict (or true), warn, ignore (or false). "true" or "strict" will use a schema to validate the input and fail the request if invalid. It will perform server side validation if ServerSideFieldValidation is enabled on the api-server, but will fall back to less reliable client-side validation if not. "warn" will warn about unknown or duplicate fields without blocking the request if server-side field validation is enabled on the API server, and behave as "ignore" otherwise. "false" or "ignore" will not perform any schema validation, silently dropping any unknown or duplicate fields. --windows-line-endings=false: Only relevant if --edit=true. Defaults to the line ending native to your platform. Usage: kubectl create -f FILENAME [options] ... skipping 38 lines ... I0520 17:09:42.963314 56621 event.go:294] "Event occurred" object="namespace-1653066580-2828/test-deployment-retainkeys-54bb65fd55" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-deployment-retainkeys-54bb65fd55-xjp5n" deployment.apps "test-deployment-retainkeys" deleted [32mapply.sh:88: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mapply.sh:92: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted [32mapply.sh:101: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0520 17:09:44.042173 65150 helpers.go:650] --dry-run=true is deprecated (boolean value) and can be replaced with --dry-run=client. pod/test-pod created (dry run) pod/test-pod created (dry run) ... skipping 29 lines ... (B[mpod/b created [32mapply.sh:208: Successful get pods a {{.metadata.name}}: a (B[m[32mapply.sh:209: Successful get pods b -n nsb {{.metadata.name}}: b (B[mpod "a" deleted pod "b" deleted [32mSuccessful (B[mmessage:error: all resources selected for prune without explicitly passing --all. To prune all resources, pass the --all flag. If you did not mean to prune all resources, specify a label selector has:all resources selected for prune without explicitly passing --all pod/a created pod/b created I0520 17:09:51.546540 53002 alloc.go:327] "allocated clusterIPs" service="namespace-1653066580-2828/prune-svc" clusterIPs=map[IPv4:10.0.0.3] service/prune-svc created I0520 17:09:54.055102 56621 horizontal.go:360] Horizontal Pod Autoscaler frontend has been deleted in namespace-1653066576-15093 ... skipping 37 lines ... [32mapply.sh:262: Successful get pods b -n nsb {{.metadata.name}}: b (B[mpod/b unchanged pod/a pruned [32mapply.sh:266: Successful get pods -n nsb {{range.items}}{{.metadata.name}}:{{end}}: b: (B[mnamespace "nsb" deleted [32mSuccessful (B[mmessage:error: the namespace from the provided object "nsb" does not match the namespace "foo". You must pass '--namespace=nsb' to perform this operation. has:the namespace from the provided object "nsb" does not match the namespace "foo". [32mapply.sh:277: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: (B[mservice/a created [32mapply.sh:281: Successful get services a {{.metadata.name}}: a (B[m[32mSuccessful (B[mmessage:The Service "a" is invalid: spec.clusterIPs[0]: Invalid value: []string{"10.0.0.12"}: may not change once set ... skipping 28 lines ... (B[m[32mapply.sh:303: Successful get deployment test-the-deployment {{.metadata.name}}: test-the-deployment (B[m[32mapply.sh:304: Successful get service test-the-service {{.metadata.name}}: test-the-service (B[mconfigmap "test-the-map" deleted service "test-the-service" deleted deployment.apps "test-the-deployment" deleted [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found [32mapply.sh:312: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:namespace/multi-resource-ns created Error from server (NotFound): error when creating "hack/testdata/multi-resource-1.yaml": namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found [32mSuccessful (B[mmessage:Error from server (NotFound): pods "test-pod" not found has:pods "test-pod" not found pod/test-pod created namespace/multi-resource-ns unchanged [32mapply.sh:320: Successful get pods test-pod -n multi-resource-ns {{.metadata.name}}: test-pod (B[mpod "test-pod" deleted namespace "multi-resource-ns" deleted I0520 17:10:21.150265 56621 namespace_controller.go:185] Namespace has been deleted nsb [32mapply.sh:326: Successful get configmaps --field-selector=metadata.name=foo {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:configmap/foo created error: resource mapping not found for name: "foo" namespace: "" from "hack/testdata/multi-resource-2.yaml": no matches for kind "Bogus" in version "example.com/v1" ensure CRDs are installed first has:no matches for kind "Bogus" in version "example.com/v1" [32mapply.sh:332: Successful get configmaps foo {{.metadata.name}}: foo (B[mconfigmap "foo" deleted [32mapply.sh:338: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful ... skipping 6 lines ... (B[mpod "pod-a" deleted pod "pod-c" deleted [32mapply.sh:346: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapply.sh:350: Successful get crds {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:customresourcedefinition.apiextensions.k8s.io/widgets.example.com created error: resource mapping not found for name: "foo" namespace: "" from "hack/testdata/multi-resource-4.yaml": no matches for kind "Widget" in version "example.com/v1" ensure CRDs are installed first has:no matches for kind "Widget" in version "example.com/v1" [32mSuccessful (B[mmessage:Error from server (NotFound): widgets.example.com "foo" not found has:widgets.example.com "foo" not found [32mapply.sh:356: Successful get crds widgets.example.com {{.metadata.name}}: widgets.example.com (B[mI0520 17:10:29.979552 56621 namespace_controller.go:185] Namespace has been deleted multi-resource-ns W0520 17:10:30.016654 56621 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0520 17:10:30.016712 56621 resource_quota_monitor.go:233] QuotaMonitor created object count evaluator for widgets.example.com I0520 17:10:30.016765 56621 shared_informer.go:255] Waiting for caches to sync for resource quota ... skipping 17 lines ... namespace/namespace-1653066630-12319 created Context "test" modified. +++ [0520 17:10:30] Testing kubectl apply --server-side [32mapply.sh:376: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/test-pod serverside-applied W0520 17:10:31.709516 53002 cacher.go:150] Terminating all watchers from cacher *unstructured.Unstructured E0520 17:10:31.710987 56621 reflector.go:138] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource [32mapply.sh:380: Successful get pods test-pod {{.metadata.labels.name}}: test-pod-label (B[m[32mSuccessful (B[mmessage:kubectl has:kubectl pod/test-pod serverside-applied [32mSuccessful (B[mmessage:kubectl my-field-manager has:my-field-manager pod "test-pod" deleted [32mapply.sh:393: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0520 17:10:32.955062 56621 reflector.go:324] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0520 17:10:32.955104 56621 reflector.go:138] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource pod/test-pod serverside-applied (server dry run) [32mapply.sh:398: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/test-pod serverside-applied pod/test-pod serverside-applied (server dry run) [32mapply.sh:405: Successful get pods test-pod {{.metadata.labels.name}}: test-pod-label (B[m[32mSuccessful (B[mmessage:872 has:872 pod "test-pod" deleted [32mapply.sh:415: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m+++ [0520 17:10:33] Testing upgrade kubectl client-side apply to server-side apply pod/test-pod created error: Apply failed with 1 conflict: conflict with "kubectl-client-side-apply" using v1: .metadata.labels.name Please review the fields above--they currently have other managers. Here are the ways you can resolve this warning: * If you intend to manage all of these fields, please re-run the apply command with the `--force-conflicts` flag. * If you do not intend to manage all of the fields, please edit your manifest to remove references to the fields that should keep their ... skipping 23 lines ... ] } } has:"name": "test-pod-applied" +++ [0520 17:10:34] Testing downgrade kubectl server-side apply to client-side apply pod/test-pod serverside-applied W0520 17:10:35.015877 56621 reflector.go:324] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0520 17:10:35.015911 56621 reflector.go:138] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:{ "apiVersion": "v1", "kind": "Pod", "metadata": { "labels": { ... skipping 40 lines ... (B[mpod "nginx-extensions" deleted [32mSuccessful (B[mmessage:pod/test1 created has:pod/test1 created pod "test1" deleted [32mSuccessful (B[mmessage:error: Invalid image name "InvalidImageName": invalid reference format has:error: Invalid image name "InvalidImageName": invalid reference format +++ exit code: 0 Recording: run_kubectl_create_filter_tests Running command: run_kubectl_create_filter_tests +++ Running case: test-cmd.run_kubectl_create_filter_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 3 lines ... Context "test" modified. +++ [0520 17:10:38] Testing kubectl create filter [32mcreate.sh:50: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mcreate.sh:54: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted +++ exit code: 0 Recording: run_kubectl_apply_deployments_tests Running command: run_kubectl_apply_deployments_tests ... skipping 3 lines ... +++ [0520 17:10:39] Creating namespace namespace-1653066639-1812 namespace/namespace-1653066639-1812 created Context "test" modified. +++ [0520 17:10:39] Testing kubectl apply deployments [32mapps.sh:121: Successful get deployments {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:122: Successful get replicasets {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0520 17:10:39.905986 56621 reflector.go:324] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0520 17:10:39.906024 56621 reflector.go:138] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mapps.sh:123: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mdeployment.apps/my-depl created I0520 17:10:40.135796 56621 event.go:294] "Event occurred" object="namespace-1653066639-1812/my-depl" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set my-depl-559b67d5b8 to 1" I0520 17:10:40.171658 56621 event.go:294] "Event occurred" object="namespace-1653066639-1812/my-depl-559b67d5b8" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: my-depl-559b67d5b8-cnb9k" [32mapps.sh:127: Successful get deployments my-depl {{.metadata.name}}: my-depl (B[m[32mapps.sh:129: Successful get deployments my-depl {{.spec.template.metadata.labels.l1}}: l1 ... skipping 3 lines ... [32mapps.sh:136: Successful get deployments my-depl {{.spec.template.metadata.labels.l1}}: l1 (B[m[32mapps.sh:137: Successful get deployments my-depl {{.spec.selector.matchLabels.l1}}: l1 (B[m[32mapps.sh:138: Successful get deployments my-depl {{.metadata.labels.l1}}: <no value> (B[mdeployment.apps "my-depl" deleted replicaset.apps "my-depl-559b67d5b8" deleted pod "my-depl-559b67d5b8-cnb9k" deleted E0520 17:10:41.180624 56621 replica_set.go:550] sync "namespace-1653066639-1812/my-depl-559b67d5b8" failed with Operation cannot be fulfilled on replicasets.apps "my-depl-559b67d5b8": StorageError: invalid object, Code: 4, Key: /registry/replicasets/namespace-1653066639-1812/my-depl-559b67d5b8, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: b0250aa1-e6f6-4174-b4f3-651dbfe4decb, UID in object meta: [32mapps.sh:144: Successful get deployments {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:145: Successful get replicasets {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:146: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:150: Successful get deployments {{range.items}}{{.metadata.name}}:{{end}}: (B[mdeployment.apps/nginx created I0520 17:10:41.788331 56621 event.go:294] "Event occurred" object="namespace-1653066639-1812/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-6cf67855f7 to 3" I0520 17:10:41.797848 56621 event.go:294] "Event occurred" object="namespace-1653066639-1812/nginx-6cf67855f7" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-6cf67855f7-tsq6h" I0520 17:10:41.807911 56621 event.go:294] "Event occurred" object="namespace-1653066639-1812/nginx-6cf67855f7" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-6cf67855f7-fsq8t" I0520 17:10:41.809963 56621 event.go:294] "Event occurred" object="namespace-1653066639-1812/nginx-6cf67855f7" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-6cf67855f7-99qd6" [32mapps.sh:154: Successful get deployment nginx {{.metadata.name}}: nginx (B[m[32mSuccessful (B[mmessage:Error from server (Conflict): error when applying patch: {"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1653066639-1812\",\"resourceVersion\":\"99\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx2\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx2\"}},\"spec\":{\"containers\":[{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"},"resourceVersion":"99"},"spec":{"selector":{"matchLabels":{"name":"nginx2"}},"template":{"metadata":{"labels":{"name":"nginx2"}}}}} to: Resource: "apps/v1, Resource=deployments", GroupVersionKind: "apps/v1, Kind=Deployment" Name: "nginx", Namespace: "namespace-1653066639-1812" for: "hack/testdata/deployment-label-change2.yaml": Operation cannot be fulfilled on deployments.apps "nginx": the object has been modified; please apply your changes to the latest version and try again has:Error from server (Conflict) deployment.apps/nginx configured I0520 17:10:50.450181 56621 event.go:294] "Event occurred" object="namespace-1653066639-1812/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-8458596ddd to 3" I0520 17:10:50.461354 56621 event.go:294] "Event occurred" object="namespace-1653066639-1812/nginx-8458596ddd" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-8458596ddd-vs469" I0520 17:10:50.473603 56621 event.go:294] "Event occurred" object="namespace-1653066639-1812/nginx-8458596ddd" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-8458596ddd-p594d" I0520 17:10:50.474333 56621 event.go:294] "Event occurred" object="namespace-1653066639-1812/nginx-8458596ddd" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-8458596ddd-qr2nj" [32mSuccessful (B[mmessage: "name": "nginx2" "name": "nginx2" has:"name": "nginx2" W0520 17:10:52.574267 56621 reflector.go:324] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0520 17:10:52.574303 56621 reflector.go:138] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:The Deployment "nginx" is invalid: spec.template.metadata.labels: Invalid value: map[string]string{"name":"nginx3"}: `selector` does not match template `labels` has:Invalid value I0520 17:10:54.863005 56621 event.go:294] "Event occurred" object="namespace-1653066639-1812/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-8458596ddd to 3" I0520 17:10:54.909982 56621 event.go:294] "Event occurred" object="namespace-1653066639-1812/nginx-8458596ddd" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-8458596ddd-4j7jx" I0520 17:10:54.925499 56621 event.go:294] "Event occurred" object="namespace-1653066639-1812/nginx-8458596ddd" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-8458596ddd-j8hk4" ... skipping 490 lines ... +++ [0520 17:11:03] Creating namespace namespace-1653066663-32129 namespace/namespace-1653066663-32129 created Context "test" modified. +++ [0520 17:11:03] Testing kubectl get [32mget.sh:29: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:37: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:45: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:{ "apiVersion": "v1", "items": [], ... skipping 21 lines ... has not:No resources found [32mSuccessful (B[mmessage:NAME has not:No resources found [32mget.sh:73: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:error: the server doesn't have a resource type "foobar" has not:No resources found [32mSuccessful (B[mmessage:No resources found in namespace-1653066663-32129 namespace. has:No resources found [32mSuccessful (B[mmessage: has not:No resources found [32mSuccessful (B[mmessage:No resources found in namespace-1653066663-32129 namespace. has:No resources found [32mget.sh:93: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has not:List [32mSuccessful (B[mmessage:I0520 17:11:05.613768 68679 loader.go:372] Config loaded from file: /tmp/tmp.HVLf2MzYKw/.kube/config I0520 17:11:05.619398 68679 round_trippers.go:553] GET https://127.0.0.1:6443/version?timeout=32s 200 OK in 5 milliseconds I0520 17:11:05.660308 68679 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/default/pods 200 OK in 2 milliseconds I0520 17:11:05.662288 68679 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/default/replicationcontrollers 200 OK in 1 milliseconds ... skipping 591 lines ... ], "kind": "List", "metadata": { "resourceVersion": "" } } W0520 17:11:13.665694 56621 reflector.go:324] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0520 17:11:13.665730 56621 reflector.go:138] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mget.sh:158: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m<no value>[32mSuccessful (B[mmessage:valid-pod: has:valid-pod: [32mSuccessful (B[mmessage:error: error executing jsonpath "{.missing}": Error executing template: missing is not found. Printing more information for debugging the template: template was: {.missing} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "kind":"Pod", "metadata":map[string]interface {}{"creationTimestamp":"2022-05-20T17:11:13Z", "labels":map[string]interface {}{"name":"valid-pod"}, "managedFields":[]interface {}{map[string]interface {}{"apiVersion":"v1", "fieldsType":"FieldsV1", "fieldsV1":map[string]interface {}{"f:metadata":map[string]interface {}{"f:labels":map[string]interface {}{".":map[string]interface {}{}, "f:name":map[string]interface {}{}}}, "f:spec":map[string]interface {}{"f:containers":map[string]interface {}{"k:{\"name\":\"kubernetes-serve-hostname\"}":map[string]interface {}{".":map[string]interface {}{}, "f:image":map[string]interface {}{}, "f:imagePullPolicy":map[string]interface {}{}, "f:name":map[string]interface {}{}, "f:resources":map[string]interface {}{".":map[string]interface {}{}, "f:limits":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}, "f:requests":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}}, "f:terminationMessagePath":map[string]interface {}{}, "f:terminationMessagePolicy":map[string]interface {}{}}}, "f:dnsPolicy":map[string]interface {}{}, "f:enableServiceLinks":map[string]interface {}{}, "f:restartPolicy":map[string]interface {}{}, "f:schedulerName":map[string]interface {}{}, "f:securityContext":map[string]interface {}{}, "f:terminationGracePeriodSeconds":map[string]interface {}{}}}, "manager":"kubectl-create", "operation":"Update", "time":"2022-05-20T17:11:13Z"}}, "name":"valid-pod", "namespace":"namespace-1653066673-22065", "resourceVersion":"1050", "uid":"581c0fc6-5034-42b1-abea-7aae8e383d17"}, "spec":map[string]interface {}{"containers":[]interface {}{map[string]interface {}{"image":"k8s.gcr.io/serve_hostname", "imagePullPolicy":"Always", "name":"kubernetes-serve-hostname", "resources":map[string]interface {}{"limits":map[string]interface {}{"cpu":"1", "memory":"512Mi"}, "requests":map[string]interface {}{"cpu":"1", "memory":"512Mi"}}, "terminationMessagePath":"/dev/termination-log", "terminationMessagePolicy":"File"}}, "dnsPolicy":"ClusterFirst", "enableServiceLinks":true, "preemptionPolicy":"PreemptLowerPriority", "priority":0, "restartPolicy":"Always", "schedulerName":"default-scheduler", "securityContext":map[string]interface {}{}, "terminationGracePeriodSeconds":30}, "status":map[string]interface {}{"phase":"Pending", "qosClass":"Guaranteed"}} has:missing is not found error: error executing template "{{.missing}}": template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing" [32mSuccessful (B[mmessage:Error executing template: template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing". Printing more information for debugging the template: template was: {{.missing}} raw data was: {"apiVersion":"v1","kind":"Pod","metadata":{"creationTimestamp":"2022-05-20T17:11:13Z","labels":{"name":"valid-pod"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"kubernetes-serve-hostname\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{".":{},"f:limits":{".":{},"f:cpu":{},"f:memory":{}},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}},"manager":"kubectl-create","operation":"Update","time":"2022-05-20T17:11:13Z"}],"name":"valid-pod","namespace":"namespace-1653066673-22065","resourceVersion":"1050","uid":"581c0fc6-5034-42b1-abea-7aae8e383d17"},"spec":{"containers":[{"image":"k8s.gcr.io/serve_hostname","imagePullPolicy":"Always","name":"kubernetes-serve-hostname","resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"1","memory":"512Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority","priority":0,"restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30},"status":{"phase":"Pending","qosClass":"Guaranteed"}} object given to template engine was: map[apiVersion:v1 kind:Pod metadata:map[creationTimestamp:2022-05-20T17:11:13Z labels:map[name:valid-pod] managedFields:[map[apiVersion:v1 fieldsType:FieldsV1 fieldsV1:map[f:metadata:map[f:labels:map[.:map[] f:name:map[]]] f:spec:map[f:containers:map[k:{"name":"kubernetes-serve-hostname"}:map[.:map[] f:image:map[] f:imagePullPolicy:map[] f:name:map[] f:resources:map[.:map[] f:limits:map[.:map[] f:cpu:map[] f:memory:map[]] f:requests:map[.:map[] f:cpu:map[] f:memory:map[]]] f:terminationMessagePath:map[] f:terminationMessagePolicy:map[]]] f:dnsPolicy:map[] f:enableServiceLinks:map[] f:restartPolicy:map[] f:schedulerName:map[] f:securityContext:map[] f:terminationGracePeriodSeconds:map[]]] manager:kubectl-create operation:Update time:2022-05-20T17:11:13Z]] name:valid-pod namespace:namespace-1653066673-22065 resourceVersion:1050 uid:581c0fc6-5034-42b1-abea-7aae8e383d17] spec:map[containers:[map[image:k8s.gcr.io/serve_hostname imagePullPolicy:Always name:kubernetes-serve-hostname resources:map[limits:map[cpu:1 memory:512Mi] requests:map[cpu:1 memory:512Mi]] terminationMessagePath:/dev/termination-log terminationMessagePolicy:File]] dnsPolicy:ClusterFirst enableServiceLinks:true preemptionPolicy:PreemptLowerPriority priority:0 restartPolicy:Always schedulerName:default-scheduler securityContext:map[] terminationGracePeriodSeconds:30] status:map[phase:Pending qosClass:Guaranteed]] has:map has no entry for key "missing" [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 1s has:valid-pod [32mSuccessful (B[mmessage:Error from server (NotFound): the server could not find the requested resource has:the server could not find the requested resource [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 1s has:STATUS [32mSuccessful ... skipping 4 lines ...