util: NewK8sClient() should not panic on non-Kubernetes clusters

When NewK8sClient() detects and error, it used to call FatalLogMsg()
which causes a panic. There are additional features that can be used on
Kubernetes clusters, but these are not a requirement for most
functionalities of the driver.

Instead of causing a panic, returning an error should suffice. This
allows using the driver on non-Kubernetes clusters again.

Fixes: #2452
Signed-off-by: Niels de Vos <ndevos@redhat.com>
This commit is contained in:
Niels de Vos 2021-08-31 14:18:37 +02:00 committed by mergify[bot]
parent e8efa272a6
commit 60c2afbcca
9 changed files with 77 additions and 21 deletions

View File

@ -125,7 +125,12 @@ func initAWSMetadataKMS(args ProviderInitArgs) (EncryptionKMS, error) {
}
func (kms *AWSMetadataKMS) getSecrets() (map[string]interface{}, error) {
c := k8s.NewK8sClient()
c, err := k8s.NewK8sClient()
if err != nil {
return nil, fmt.Errorf("failed to connect to Kubernetes to "+
"get Secret %s/%s: %w", kms.namespace, kms.secretName, err)
}
secret, err := c.CoreV1().Secrets(kms.namespace).Get(context.TODO(),
kms.secretName, metav1.GetOptions{})
if err != nil {

View File

@ -154,7 +154,12 @@ func getKMSConfigMap() (map[string]interface{}, error) {
}
cmName := getKMSConfigMapName()
c := k8s.NewK8sClient()
c, err := k8s.NewK8sClient()
if err != nil {
return nil, fmt.Errorf("can not get ConfigMap %q, failed to "+
"connect to Kubernetes: %w", cmName, err)
}
cm, err := c.CoreV1().ConfigMaps(ns).Get(context.Background(),
cmName, metav1.GetOptions{})
if err != nil {

View File

@ -159,7 +159,12 @@ func (kms SecretsMetadataKMS) fetchEncryptionPassphrase(
secretNamespace = defaultNamespace
}
c := k8s.NewK8sClient()
c, err := k8s.NewK8sClient()
if err != nil {
return "", fmt.Errorf("can not get Secret %s/%s, failed to "+
"connect to Kubernetes: %w", secretNamespace, secretName, err)
}
secret, err := c.CoreV1().Secrets(secretNamespace).Get(context.TODO(),
secretName, metav1.GetOptions{})
if err != nil {

View File

@ -260,7 +260,12 @@ func (kms *VaultTenantSA) setServiceAccountName(config map[string]interface{}) e
// getServiceAccount returns the Tenants ServiceAccount with the name
// configured in the VaultTenantSA.
func (kms *VaultTenantSA) getServiceAccount() (*corev1.ServiceAccount, error) {
c := kms.getK8sClient()
c, err := kms.getK8sClient()
if err != nil {
return nil, fmt.Errorf("can not get ServiceAccount %s/%s, "+
"failed to connect to Kubernetes: %w", kms.Tenant, kms.tenantSAName, err)
}
sa, err := c.CoreV1().ServiceAccounts(kms.Tenant).Get(context.TODO(),
kms.tenantSAName, metav1.GetOptions{})
if err != nil {
@ -279,7 +284,13 @@ func (kms *VaultTenantSA) getToken() (string, error) {
return "", err
}
c := kms.getK8sClient()
c, err := kms.getK8sClient()
if err != nil {
return "", fmt.Errorf("can not get ServiceAccount %s/%s, failed "+
"to connect to Kubernetes: %w", kms.Tenant,
kms.tenantSAName, err)
}
for _, secretRef := range sa.Secrets {
secret, err := c.CoreV1().Secrets(kms.Tenant).Get(context.TODO(), secretRef.Name, metav1.GetOptions{})
if err != nil {

View File

@ -438,12 +438,16 @@ func (vtc *vaultTenantConnection) initCertificates(config map[string]interface{}
return nil
}
func (vtc *vaultTenantConnection) getK8sClient() *kubernetes.Clientset {
func (vtc *vaultTenantConnection) getK8sClient() (*kubernetes.Clientset, error) {
if vtc.client == nil {
vtc.client = k8s.NewK8sClient()
client, err := k8s.NewK8sClient()
if err != nil {
return nil, err
}
vtc.client = client
}
return vtc.client
return vtc.client, nil
}
// FetchDEK returns passphrase from Vault. The passphrase is stored in a
@ -493,7 +497,11 @@ func (vtc *vaultTenantConnection) RemoveDEK(key string) error {
}
func (kms *VaultTokensKMS) getToken() (string, error) {
c := kms.getK8sClient()
c, err := kms.getK8sClient()
if err != nil {
return "", err
}
secret, err := c.CoreV1().Secrets(kms.Tenant).Get(context.TODO(), kms.TokenName, metav1.GetOptions{})
if err != nil {
return "", err
@ -508,7 +516,11 @@ func (kms *VaultTokensKMS) getToken() (string, error) {
}
func (vtc *vaultTenantConnection) getCertificate(tenant, secretName, key string) (string, error) {
c := vtc.getK8sClient()
c, err := vtc.getK8sClient()
if err != nil {
return "", err
}
secret, err := c.CoreV1().Secrets(tenant).Get(context.TODO(), secretName, metav1.GetOptions{})
if err != nil {
return "", err
@ -551,7 +563,11 @@ func (vtc *vaultTenantConnection) parseTenantConfig() (map[string]interface{}, e
}
// fetch the ConfigMap from the tenants namespace
c := vtc.getK8sClient()
c, err := vtc.getK8sClient()
if err != nil {
return nil, err
}
cm, err := c.CoreV1().ConfigMaps(vtc.Tenant).Get(context.TODO(),
vtc.ConfigName, metav1.GetOptions{})
if apierrs.IsNotFound(err) {

View File

@ -129,7 +129,13 @@ func callNodeStageVolume(ns *NodeServer, c *k8s.Clientset, pv *v1.PersistentVolu
// runVolumeHealer heal the volumes attached on a node.
func runVolumeHealer(ns *NodeServer, conf *util.Config) error {
c := kubeclient.NewK8sClient()
c, err := kubeclient.NewK8sClient()
if err != nil {
log.ErrorLogMsg("failed to connect to Kubernetes: %v", err)
return err
}
val, err := c.StorageV1().VolumeAttachments().List(context.TODO(), metav1.ListOptions{})
if err != nil {
log.ErrorLogMsg("list volumeAttachments failed, err: %v", err)

View File

@ -1064,7 +1064,11 @@ func genVolFromVolID(
// be the same in the PV.Spec.CSI.VolumeHandle. Check the PV annotation for
// the new volumeHandle. If the new volumeHandle is found, generate the RBD
// volume structure from the new volumeHandle.
c := k8s.NewK8sClient()
c, cErr := k8s.NewK8sClient()
if cErr != nil {
return vol, cErr
}
listOpt := metav1.ListOptions{
LabelSelector: PVReplicatedLabelKey,
}

View File

@ -17,35 +17,34 @@ limitations under the License.
package k8s
import (
"fmt"
"os"
"github.com/ceph/ceph-csi/internal/util/log"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// NewK8sClient create kubernetes client.
func NewK8sClient() *kubernetes.Clientset {
func NewK8sClient() (*kubernetes.Clientset, error) {
var cfg *rest.Config
var err error
cPath := os.Getenv("KUBERNETES_CONFIG_PATH")
if cPath != "" {
cfg, err = clientcmd.BuildConfigFromFlags("", cPath)
if err != nil {
log.FatalLogMsg("Failed to get cluster config with error: %v\n", err)
return nil, fmt.Errorf("failed to get cluster config from %q: %w", cPath, err)
}
} else {
cfg, err = rest.InClusterConfig()
if err != nil {
log.FatalLogMsg("Failed to get cluster config with error: %v\n", err)
return nil, fmt.Errorf("failed to get cluster config: %w", err)
}
}
client, err := kubernetes.NewForConfig(cfg)
if err != nil {
log.FatalLogMsg("Failed to create client with error: %v\n", err)
return nil, fmt.Errorf("failed to create client: %w", err)
}
return client
return client, nil
}

View File

@ -35,7 +35,12 @@ const (
)
func k8sGetNodeLabels(nodeName string) (map[string]string, error) {
client := k8s.NewK8sClient()
client, err := k8s.NewK8sClient()
if err != nil {
return nil, fmt.Errorf("can not get node %q information, failed "+
"to connect to Kubernetes: %w", nodeName, err)
}
node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get node %q information: %w", nodeName, err)