cleanup: Move common files to deploy folder

Few common files related to deployments were kept
in the examples folder initially. Moving them to
deploy folder and updating the relevant files.

Signed-off-by: karthik-us <ksubrahm@redhat.com>
This commit is contained in:
karthik-us 2023-05-30 20:47:51 +05:30 committed by mergify[bot]
parent b5e68c810e
commit 6ac3a4dabc
11 changed files with 27 additions and 13 deletions

View File

@ -148,7 +148,7 @@ for more information.
**Deploy Ceph configuration ConfigMap for CSI pods:** **Deploy Ceph configuration ConfigMap for CSI pods:**
```bash ```bash
kubectl create -f ../../../examples/ceph-conf.yaml kubectl create -f ../../ceph-conf.yaml
``` ```
**Deploy CSI sidecar containers:** **Deploy CSI sidecar containers:**

View File

@ -134,7 +134,7 @@ for more information.
**Deploy Ceph configuration ConfigMap for CSI pods:** **Deploy Ceph configuration ConfigMap for CSI pods:**
```bash ```bash
kubectl create -f ../example/ceph-config.yaml kubectl create -f ../../ceph-conf.yaml
``` ```
**Deploy CSI sidecar containers:** **Deploy CSI sidecar containers:**

View File

@ -23,7 +23,7 @@ csi_liveness 1
``` ```
Promethues can be deployed through the promethues operator described [here](https://coreos.com/operators/prometheus/docs/latest/user-guides/getting-started.html). Promethues can be deployed through the promethues operator described [here](https://coreos.com/operators/prometheus/docs/latest/user-guides/getting-started.html).
The [service-monitor](../examples/service-monitor.yaml) will tell promethues how The [service-monitor](../deploy/service-monitor.yaml) will tell promethues how
to pull metrics out of CSI. to pull metrics out of CSI.
Each CSI pod has a service to expose the endpoint to prometheus. By default, rbd Each CSI pod has a service to expose the endpoint to prometheus. By default, rbd

View File

@ -67,6 +67,7 @@ func deleteCephfsPlugin() {
} }
func createORDeleteCephfsResources(action kubectlAction) { func createORDeleteCephfsResources(action kubectlAction) {
cephConfigFile := getConfigFile(deployPath + cephConfconfigMap)
resources := []ResourceDeployer{ resources := []ResourceDeployer{
// shared resources // shared resources
&yamlResource{ &yamlResource{
@ -74,7 +75,7 @@ func createORDeleteCephfsResources(action kubectlAction) {
allowMissing: true, allowMissing: true,
}, },
&yamlResource{ &yamlResource{
filename: examplePath + cephConfconfigMap, filename: cephConfigFile,
allowMissing: true, allowMissing: true,
}, },
// dependencies for provisioner // dependencies for provisioner

View File

@ -79,6 +79,7 @@ func deleteNFSPlugin() {
} }
func createORDeleteNFSResources(f *framework.Framework, action kubectlAction) { func createORDeleteNFSResources(f *framework.Framework, action kubectlAction) {
cephConfigFile := getConfigFile(deployPath + cephConfconfigMap)
resources := []ResourceDeployer{ resources := []ResourceDeployer{
// shared resources // shared resources
&yamlResource{ &yamlResource{
@ -86,7 +87,7 @@ func createORDeleteNFSResources(f *framework.Framework, action kubectlAction) {
allowMissing: true, allowMissing: true,
}, },
&yamlResource{ &yamlResource{
filename: examplePath + cephConfconfigMap, filename: cephConfigFile,
allowMissing: true, allowMissing: true,
}, },
// dependencies for provisioner // dependencies for provisioner

View File

@ -44,7 +44,8 @@ var (
configMap = "csi-config-map.yaml" configMap = "csi-config-map.yaml"
cephConfconfigMap = "ceph-conf.yaml" cephConfconfigMap = "ceph-conf.yaml"
csiDriverObject = "csidriver.yaml" csiDriverObject = "csidriver.yaml"
rbdDirPath = "../deploy/rbd/kubernetes/" deployPath = "../deploy/"
rbdDirPath = deployPath + "/rbd/kubernetes/"
examplePath = "../examples/" examplePath = "../examples/"
rbdExamplePath = examplePath + "/rbd/" rbdExamplePath = examplePath + "/rbd/"
e2eTemplatesPath = "../e2e/templates/" e2eTemplatesPath = "../e2e/templates/"
@ -129,6 +130,7 @@ func deleteRBDPlugin() {
} }
func createORDeleteRbdResources(action kubectlAction) { func createORDeleteRbdResources(action kubectlAction) {
cephConfigFile := getConfigFile(deployPath + cephConfconfigMap)
resources := []ResourceDeployer{ resources := []ResourceDeployer{
// shared resources // shared resources
&yamlResource{ &yamlResource{
@ -136,7 +138,7 @@ func createORDeleteRbdResources(action kubectlAction) {
allowMissing: true, allowMissing: true,
}, },
&yamlResource{ &yamlResource{
filename: examplePath + cephConfconfigMap, filename: cephConfigFile,
allowMissing: true, allowMissing: true,
}, },
// dependencies for provisioner // dependencies for provisioner

View File

@ -1732,3 +1732,13 @@ func rwopMayFail(err error) bool {
return !rwopSupported return !rwopSupported
} }
// getConfigFile returns the passed config file location if it exists, else
// returns the old location of the config file under 'examples/' directory.
func getConfigFile(configFile string) string {
if _, err := os.Stat(configFile); os.IsNotExist(err) {
configFile = examplePath + cephConfconfigMap
}
return configFile
}

View File

@ -2,17 +2,17 @@
## Deploying Ceph-CSI services ## Deploying Ceph-CSI services
Create [ceph-config](./ceph-conf.yaml) configmap using the following command. Create [ceph-config](../deploy/ceph-conf.yaml) configmap using the following command.
```bash ```bash
kubectl apply -f ./ceph-conf.yaml kubectl apply -f ../deploy/ceph-conf.yaml
``` ```
Both `rbd` and `cephfs` directories contain `plugin-deploy.sh` and Both `rbd` and `cephfs` directories contain `plugin-deploy.sh` and
`plugin-teardown.sh` helper scripts. You can use those to help you `plugin-teardown.sh` helper scripts. You can use those to help you
deploy/teardown RBACs, sidecar containers and the plugin in one go. deploy/teardown RBACs, sidecar containers and the plugin in one go.
By default, they look for the YAML manifests in By default, they look for the YAML manifests in
`../../deploy/{rbd,cephfs}/kubernetes`. `../deploy/{rbd,cephfs}/kubernetes`.
You can override this path by running You can override this path by running
```bash ```bash
@ -25,7 +25,7 @@ The CSI plugin requires configuration information regarding the Ceph cluster(s),
that would host the dynamically or statically provisioned volumes. This that would host the dynamically or statically provisioned volumes. This
is provided by adding a per-cluster identifier (referred to as clusterID), and is provided by adding a per-cluster identifier (referred to as clusterID), and
the required monitor details for the same, as in the provided [sample config the required monitor details for the same, as in the provided [sample config
map](./csi-config-map-sample.yaml). map](../deploy/csi-config-map-sample.yaml).
Gather the following information from the Ceph cluster(s) of choice, Gather the following information from the Ceph cluster(s) of choice,
@ -38,13 +38,13 @@ Gather the following information from the Ceph cluster(s) of choice,
* Alternatively, choose a `<cluster-id>` value that is distinct per Ceph * Alternatively, choose a `<cluster-id>` value that is distinct per Ceph
cluster in use by this kubernetes cluster cluster in use by this kubernetes cluster
Update the [sample configmap](./csi-config-map-sample.yaml) with values Update the [sample configmap](../deploy/csi-config-map-sample.yaml) with values
from a Ceph cluster and replace `<cluster-id>` with the chosen clusterID, to from a Ceph cluster and replace `<cluster-id>` with the chosen clusterID, to
create the manifest for the configmap which can be updated in the cluster create the manifest for the configmap which can be updated in the cluster
using the following command, using the following command,
```bash ```bash
kubectl replace -f ./csi-config-map-sample.yaml kubectl replace -f ../deploy/csi-config-map-sample.yaml
``` ```
Storage class and snapshot class, using `<cluster-id>` as the value for the Storage class and snapshot class, using `<cluster-id>` as the value for the