守着一只汪
更新(最终答案)附加OP要求我修改我的答案以显示“微调”或“特定”服务帐户的配置,而不使用集群管理员..据我所知,/healthz默认情况下,每个 pod 都具有读取权限。例如,以下内容CronJob在不使用 aServiceAccount的情况下也可以正常工作:# cronjobapiVersion: batch/v1beta1kind: CronJobmetadata: name: is-healthz-ok-no-svcspec: schedule: "*/5 * * * *" # at every fifth minute jobTemplate: spec: template: spec:######### serviceAccountName: health-reader-sa containers: - name: is-healthz-ok-no-svc image: oze4/is-healthz-ok:latest restartPolicy: OnFailure原来的我继续为此写了一个概念证明。您可以在此处找到完整的 repo,但代码如下。main.gopackage mainimport ( "os" "errors" "fmt" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest")func main() { client, err := newInClusterClient() if err != nil { panic(err.Error()) } path := "/healthz" content, err := client.Discovery().RESTClient().Get().AbsPath(path).DoRaw() if err != nil { fmt.Printf("ErrorBadRequst : %s\n", err.Error()) os.Exit(1) } contentStr := string(content) if contentStr != "ok" { fmt.Printf("ErrorNotOk : response != 'ok' : %s\n", contentStr) os.Exit(1) } fmt.Printf("Success : ok!") os.Exit(0)}func newInClusterClient() (*kubernetes.Clientset, error) { config, err := rest.InClusterConfig() if err != nil { return &kubernetes.Clientset{}, errors.New("Failed loading client config") } clientset, err := kubernetes.NewForConfig(config) if err != nil { return &kubernetes.Clientset{}, errors.New("Failed getting clientset") } return clientset, nil}dockerfileFROM golang:latestRUN mkdir /appADD . /appWORKDIR /appRUN go build -o main .CMD ["/app/main"]部署.yaml(作为 CronJob)# cronjobapiVersion: batch/v1beta1kind: CronJobmetadata: name: is-healthz-okspec: schedule: "*/5 * * * *" # at every fifth minute jobTemplate: spec: template: spec: serviceAccountName: is-healthz-ok containers: - name: is-healthz-ok image: oze4/is-healthz-ok:latest restartPolicy: OnFailure---# service accountapiVersion: v1kind: ServiceAccountmetadata: name: is-healthz-ok namespace: default---# cluster role bindingkind: ClusterRoleBindingapiVersion: rbac.authorization.k8s.io/v1metadata: name: is-healthz-oksubjects: - kind: ServiceAccount name: is-healthz-ok namespace: defaultroleRef: kind: ClusterRole ########################################################################## # Instead of assigning cluster-admin you can create your own ClusterRole # # I used cluster-admin because this is a homelab # ########################################################################## name: cluster-admin apiGroup: rbac.authorization.k8s.io---截屏成功的 CronJob 运行更新 1OP 询问如何部署“in-cluster-client-config”,所以我提供了一个示例部署(我正在使用的)..你可以在这里找到回购示例部署(我使用的是 CronJob,但它可以是任何东西):cronjob.yamlapiVersion: batch/v1beta1kind: CronJobmetadata: name: remove-terminating-namespaces-cronjobspec: schedule: "0 */1 * * *" # at minute 0 of each hour aka once per hour #successfulJobsHistoryLimit: 0 #failedJobsHistoryLimit: 0 jobTemplate: spec: template: spec: serviceAccountName: svc-remove-terminating-namespaces containers: - name: remove-terminating-namespaces image: oze4/service.remove-terminating-namespaces:latest restartPolicy: OnFailurerbac.yamlapiVersion: v1kind: ServiceAccountmetadata: name: svc-remove-terminating-namespaces namespace: default---kind: ClusterRoleBindingapiVersion: rbac.authorization.k8s.io/v1metadata: name: crb-namespace-reader-writersubjects:- kind: ServiceAccount name: svc-remove-terminating-namespaces namespace: defaultroleRef: kind: ClusterRole ########################################################################## # Instead of assigning cluster-admin you can create your own ClusterRole # # I used cluster-admin because this is a homelab # ########################################################################## name: cluster-admin apiGroup: rbac.authorization.k8s.io---原始答案听起来您正在寻找的是来自 client-go 的“in-cluster-client-config”。重要的是要记住,当使用“in-cluster-client-config”时,Go 代码中的 API 调用使用“that”pod 的服务帐户。只是想确保您使用有权读取“/livez”的帐户进行测试。我测试了以下代码,我能够获得“livez”状态..package mainimport ( "errors" "flag" "fmt" "path/filepath" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/rest" "k8s.io/client-go/util/homedir")func main() { // I find it easiest to use "out-of-cluster" for tetsing // client, err := newOutOfClusterClient() client, err := newInClusterClient() if err != nil { panic(err.Error()) } livez := "/livez" content, _ := client.Discovery().RESTClient().Get().AbsPath(livez).DoRaw() fmt.Println(string(content))}func newInClusterClient() (*kubernetes.Clientset, error) { config, err := rest.InClusterConfig() if err != nil { return &kubernetes.Clientset{}, errors.New("Failed loading client config") } clientset, err := kubernetes.NewForConfig(config) if err != nil { return &kubernetes.Clientset{}, errors.New("Failed getting clientset") } return clientset, nil}// I find it easiest to use "out-of-cluster" for tetsingfunc newOutOfClusterClient() (*kubernetes.Clientset, error) { var kubeconfig *string if home := homedir.HomeDir(); home != "" { kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file") } else { kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file") } flag.Parse() // use the current context in kubeconfig config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig) if err != nil { return nil, err } // create the clientset client, err := kubernetes.NewForConfig(config) if err != nil { return nil, err } return client, nil}