2018-01-09 18:59:50 +00:00
|
|
|
/*
|
2019-04-03 08:46:15 +00:00
|
|
|
Copyright 2018 The Ceph-CSI Authors.
|
2018-01-09 18:59:50 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package rbd
|
|
|
|
|
|
|
|
import (
|
2019-08-24 09:14:15 +00:00
|
|
|
"context"
|
2019-04-22 21:35:39 +00:00
|
|
|
"encoding/json"
|
2020-06-25 11:30:04 +00:00
|
|
|
"errors"
|
2018-01-09 18:59:50 +00:00
|
|
|
"fmt"
|
2019-08-03 22:11:28 +00:00
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2019-12-13 11:41:32 +00:00
|
|
|
"strconv"
|
2018-01-09 18:59:50 +00:00
|
|
|
"strings"
|
|
|
|
"time"
|
2018-03-06 22:33:57 +00:00
|
|
|
|
2020-04-17 09:23:49 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util"
|
2021-08-26 11:15:47 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util/k8s"
|
2021-08-24 15:03:25 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util/log"
|
2020-04-15 03:38:16 +00:00
|
|
|
|
2020-03-16 09:26:43 +00:00
|
|
|
"github.com/ceph/go-ceph/rados"
|
2020-01-07 13:45:52 +00:00
|
|
|
librbd "github.com/ceph/go-ceph/rbd"
|
2021-06-28 11:38:42 +00:00
|
|
|
"github.com/ceph/go-ceph/rbd/admin"
|
2020-01-24 16:26:56 +00:00
|
|
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
2020-06-24 07:43:24 +00:00
|
|
|
"github.com/golang/protobuf/ptypes"
|
2019-04-22 21:35:39 +00:00
|
|
|
"github.com/golang/protobuf/ptypes/timestamp"
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
2018-07-20 08:46:44 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/sets"
|
2020-01-17 15:44:06 +00:00
|
|
|
"k8s.io/cloud-provider/volume/helpers"
|
2021-06-15 12:25:29 +00:00
|
|
|
mount "k8s.io/mount-utils"
|
2018-01-09 18:59:50 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// The following three values are used for 30 seconds timeout
|
|
|
|
// while waiting for RBD Watcher to expire.
|
|
|
|
rbdImageWatcherInitDelay = 1 * time.Second
|
|
|
|
rbdImageWatcherFactor = 1.4
|
|
|
|
rbdImageWatcherSteps = 10
|
2018-09-18 14:09:12 +00:00
|
|
|
rbdDefaultMounter = "rbd"
|
2021-03-11 06:28:48 +00:00
|
|
|
rbdNbdMounter = "rbd-nbd"
|
2021-08-18 07:21:23 +00:00
|
|
|
defaultLogDir = "/var/log/ceph"
|
2021-09-01 11:53:43 +00:00
|
|
|
defaultLogStrategy = "remove" // supports remove, compress and preserve
|
2019-08-06 16:59:40 +00:00
|
|
|
|
|
|
|
// Output strings returned during invocation of "ceph rbd task add remove <imagespec>" when
|
|
|
|
// command is not supported by ceph manager. Used to check errors and recover when the command
|
|
|
|
// is unsupported.
|
2019-10-10 14:30:58 +00:00
|
|
|
rbdTaskRemoveCmdInvalidString1 = "no valid command found"
|
|
|
|
rbdTaskRemoveCmdInvalidString2 = "Error EINVAL: invalid command"
|
|
|
|
rbdTaskRemoveCmdAccessDeniedMessage = "Error EACCES:"
|
2021-01-28 08:45:51 +00:00
|
|
|
|
2021-06-30 05:11:49 +00:00
|
|
|
// image metadata key for thick-provisioning.
|
|
|
|
// As image metadata key starting with '.rbd' will not be copied when we do
|
|
|
|
// clone or mirroring, deprecating the old key for the same reason use
|
|
|
|
// 'thickProvisionMetaKey' to set image metadata.
|
|
|
|
deprecatedthickProvisionMetaKey = ".rbd.csi.ceph.com/thick-provisioned"
|
|
|
|
thickProvisionMetaKey = "rbd.csi.ceph.com/thick-provisioned"
|
2021-07-07 05:58:40 +00:00
|
|
|
|
|
|
|
// these are the metadata set on the image to identify the image is
|
|
|
|
// thick provisioned or thin provisioned.
|
|
|
|
thickProvisionMetaData = "true"
|
|
|
|
thinProvisionMetaData = "false"
|
2021-09-16 08:26:06 +00:00
|
|
|
|
2021-10-01 04:18:19 +00:00
|
|
|
// migration label key and value for parameters in volume context.
|
2021-09-16 08:26:06 +00:00
|
|
|
intreeMigrationKey = "migration"
|
|
|
|
intreeMigrationLabel = "true"
|
2021-10-01 04:18:19 +00:00
|
|
|
migInTreeImagePrefix = "kubernetes-dynamic-pvc-"
|
|
|
|
// migration volume handle identifiers.
|
|
|
|
// total length of fields in the migration volume handle.
|
|
|
|
migVolIDTotalLength = 4
|
|
|
|
// split boundary length of fields.
|
|
|
|
migVolIDSplitLength = 3
|
|
|
|
// separator for migration handle fields.
|
|
|
|
migVolIDFieldSep = "_"
|
|
|
|
// identifier of a migration vol handle.
|
|
|
|
migIdentifier = "mig"
|
|
|
|
// prefix of image field.
|
|
|
|
migImageNamePrefix = "image-"
|
|
|
|
// prefix in the handle for monitors field.
|
|
|
|
migMonPrefix = "mons-"
|
2018-01-09 18:59:50 +00:00
|
|
|
)
|
|
|
|
|
2021-03-12 12:37:15 +00:00
|
|
|
// rbdImage contains common attributes and methods for the rbdVolume and
|
|
|
|
// rbdSnapshot types.
|
|
|
|
type rbdImage struct {
|
|
|
|
// RbdImageName is the name of the RBD image backing this rbdVolume.
|
|
|
|
// This does not have a JSON tag as it is not stashed in JSON encoded
|
|
|
|
// config maps in v1.0.0
|
|
|
|
RbdImageName string
|
|
|
|
// ImageID contains the image id of the image
|
|
|
|
ImageID string
|
|
|
|
// VolID is the volume ID that is exchanged with CSI drivers,
|
|
|
|
// identifying this rbd image
|
|
|
|
VolID string `json:"volID"`
|
|
|
|
|
|
|
|
Monitors string
|
2021-04-19 05:23:55 +00:00
|
|
|
// JournalPool is the ceph pool in which the CSI Journal/CSI snapshot Journal is
|
2021-03-12 12:37:15 +00:00
|
|
|
// stored
|
|
|
|
JournalPool string
|
2021-04-19 05:23:55 +00:00
|
|
|
// Pool is where the image journal/image snapshot journal and image/snapshot
|
|
|
|
// is stored, and could be the same as `JournalPool` (retained as Pool instead of
|
2021-03-12 12:37:15 +00:00
|
|
|
// renaming to ImagePool or such, as this is referenced in the code
|
|
|
|
// extensively)
|
|
|
|
Pool string
|
|
|
|
RadosNamespace string
|
|
|
|
ClusterID string `json:"clusterId"`
|
|
|
|
// RequestName is the CSI generated volume name for the rbdVolume.
|
|
|
|
// This does not have a JSON tag as it is not stashed in JSON encoded
|
|
|
|
// config maps in v1.0.0
|
|
|
|
RequestName string
|
|
|
|
NamePrefix string
|
|
|
|
|
|
|
|
// encryption provides access to optional VolumeEncryption functions
|
|
|
|
encryption *util.VolumeEncryption
|
|
|
|
// Owner is the creator (tenant, Kubernetes Namespace) of the volume
|
|
|
|
Owner string
|
|
|
|
|
|
|
|
CreatedAt *timestamp.Timestamp
|
|
|
|
|
|
|
|
// conn is a connection to the Ceph cluster obtained from a ConnPool
|
|
|
|
conn *util.ClusterConnection
|
|
|
|
// an opened IOContext, call .openIoctx() before using
|
|
|
|
ioctx *rados.IOContext
|
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// rbdVolume represents a CSI volume and its RBD image specifics.
|
2018-03-06 22:33:57 +00:00
|
|
|
type rbdVolume struct {
|
2021-03-12 12:37:15 +00:00
|
|
|
rbdImage
|
|
|
|
|
2019-05-31 18:09:24 +00:00
|
|
|
// VolName and MonValueFromSecret are retained from older plugin versions (<= 1.0.0)
|
2021-06-11 08:06:32 +00:00
|
|
|
// for backward compatibility reasons
|
2020-01-24 16:26:56 +00:00
|
|
|
TopologyPools *[]util.TopologyConstrainedPool
|
|
|
|
TopologyRequirement *csi.TopologyRequirement
|
|
|
|
Topology map[string]string
|
2021-03-12 12:37:15 +00:00
|
|
|
// DataPool is where the data for images in `Pool` are stored, this is used as the `--data-pool`
|
2021-06-11 08:06:32 +00:00
|
|
|
// argument when the pool is created, and is not used anywhere else
|
2021-03-17 10:55:49 +00:00
|
|
|
DataPool string
|
|
|
|
ParentName string
|
|
|
|
// Parent Pool is the pool that contains the parent image.
|
|
|
|
ParentPool string
|
2021-03-12 12:37:15 +00:00
|
|
|
imageFeatureSet librbd.FeatureSet
|
|
|
|
AdminID string `json:"adminId"`
|
|
|
|
UserID string `json:"userId"`
|
|
|
|
Mounter string `json:"mounter"`
|
|
|
|
ReservedID string
|
|
|
|
MapOptions string
|
|
|
|
UnmapOptions string
|
2021-08-18 07:21:23 +00:00
|
|
|
LogDir string
|
2021-09-01 11:53:43 +00:00
|
|
|
LogStrategy string
|
2021-03-12 12:37:15 +00:00
|
|
|
VolName string `json:"volName"`
|
|
|
|
MonValueFromSecret string `json:"monValueFromSecret"`
|
|
|
|
VolSize int64 `json:"volSize"`
|
|
|
|
DisableInUseChecks bool `json:"disableInUseChecks"`
|
|
|
|
readOnly bool
|
|
|
|
Primary bool
|
|
|
|
ThickProvision bool
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// rbdSnapshot represents a CSI snapshot and its RBD snapshot specifics.
|
2018-08-08 05:42:17 +00:00
|
|
|
type rbdSnapshot struct {
|
2021-03-12 12:37:15 +00:00
|
|
|
rbdImage
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// SourceVolumeID is the volume ID of RbdImageName, that is exchanged with CSI drivers
|
|
|
|
// RbdSnapName is the name of the RBD snapshot backing this rbdSnapshot
|
|
|
|
SourceVolumeID string
|
2020-06-24 07:01:06 +00:00
|
|
|
ReservedID string
|
2019-04-22 21:35:39 +00:00
|
|
|
RbdSnapName string
|
|
|
|
SizeBytes int64
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
2021-03-11 06:28:48 +00:00
|
|
|
// imageFeature represents required image features and value.
|
|
|
|
type imageFeature struct {
|
|
|
|
// needRbdNbd indicates whether this image feature requires an rbd-nbd mounter
|
|
|
|
needRbdNbd bool
|
|
|
|
// dependsOn is the image features required for this imageFeature
|
|
|
|
dependsOn []string
|
|
|
|
}
|
|
|
|
|
2021-10-01 04:18:19 +00:00
|
|
|
// migrationvolID is a struct which consists of required fields of a rbd volume
|
|
|
|
// from migrated volumeID.
|
|
|
|
type migrationVolID struct {
|
|
|
|
imageName string
|
|
|
|
poolName string
|
|
|
|
clusterID string
|
|
|
|
}
|
|
|
|
|
2021-07-13 12:21:05 +00:00
|
|
|
var supportedFeatures = map[string]imageFeature{
|
|
|
|
librbd.FeatureNameLayering: {
|
|
|
|
needRbdNbd: false,
|
|
|
|
},
|
|
|
|
librbd.FeatureNameExclusiveLock: {
|
|
|
|
needRbdNbd: true,
|
|
|
|
},
|
|
|
|
librbd.FeatureNameJournaling: {
|
|
|
|
needRbdNbd: true,
|
|
|
|
dependsOn: []string{librbd.FeatureNameExclusiveLock},
|
|
|
|
},
|
|
|
|
}
|
2018-01-09 18:59:50 +00:00
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// Connect an rbdVolume to the Ceph cluster.
|
2021-03-12 12:37:15 +00:00
|
|
|
func (ri *rbdImage) Connect(cr *util.Credentials) error {
|
|
|
|
if ri.conn != nil {
|
2020-03-18 08:30:02 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
conn := &util.ClusterConnection{}
|
2021-03-12 12:37:15 +00:00
|
|
|
if err := conn.Connect(ri.Monitors, cr); err != nil {
|
2020-03-18 08:30:02 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-03-12 12:37:15 +00:00
|
|
|
ri.conn = conn
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-03-18 08:30:02 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Destroy cleans up the rbdVolume and closes the connection to the Ceph
|
|
|
|
// cluster in case one was setup.
|
2021-03-12 12:37:15 +00:00
|
|
|
func (ri *rbdImage) Destroy() {
|
|
|
|
if ri.ioctx != nil {
|
|
|
|
ri.ioctx.Destroy()
|
2020-06-03 07:37:44 +00:00
|
|
|
}
|
2021-03-12 12:37:15 +00:00
|
|
|
if ri.conn != nil {
|
|
|
|
ri.conn.Destroy()
|
2020-03-18 08:30:02 +00:00
|
|
|
}
|
2021-03-12 12:37:15 +00:00
|
|
|
if ri.isEncrypted() {
|
|
|
|
ri.encryption.Destroy()
|
2020-12-03 08:25:52 +00:00
|
|
|
}
|
2020-03-18 08:30:02 +00:00
|
|
|
}
|
|
|
|
|
2020-06-01 13:57:51 +00:00
|
|
|
// String returns the image-spec (pool/{namespace/}image) format of the image.
|
2021-03-12 12:37:15 +00:00
|
|
|
func (ri *rbdImage) String() string {
|
|
|
|
if ri.RadosNamespace != "" {
|
|
|
|
return fmt.Sprintf("%s/%s/%s", ri.Pool, ri.RadosNamespace, ri.RbdImageName)
|
2020-06-01 13:57:51 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-03-12 12:37:15 +00:00
|
|
|
return fmt.Sprintf("%s/%s", ri.Pool, ri.RbdImageName)
|
2020-05-28 18:39:44 +00:00
|
|
|
}
|
|
|
|
|
2020-06-01 13:57:51 +00:00
|
|
|
// String returns the snap-spec (pool/{namespace/}image@snap) format of the snapshot.
|
2020-05-28 18:39:44 +00:00
|
|
|
func (rs *rbdSnapshot) String() string {
|
2020-06-01 13:57:51 +00:00
|
|
|
if rs.RadosNamespace != "" {
|
|
|
|
return fmt.Sprintf("%s/%s/%s@%s", rs.Pool, rs.RadosNamespace, rs.RbdImageName, rs.RbdSnapName)
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-05-28 18:39:44 +00:00
|
|
|
return fmt.Sprintf("%s/%s@%s", rs.Pool, rs.RbdImageName, rs.RbdSnapName)
|
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// createImage creates a new ceph image with provision and volume options.
|
2020-02-26 09:35:18 +00:00
|
|
|
func createImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) error {
|
|
|
|
volSzMiB := fmt.Sprintf("%dM", util.RoundOffVolSize(pOpts.VolSize))
|
2020-01-07 13:45:52 +00:00
|
|
|
options := librbd.NewRbdImageOptions()
|
|
|
|
|
2020-05-28 18:39:44 +00:00
|
|
|
logMsg := "rbd: create %s size %s (features: %s) using mon %s"
|
2019-09-11 07:08:55 +00:00
|
|
|
if pOpts.DataPool != "" {
|
2020-05-28 18:39:44 +00:00
|
|
|
logMsg += fmt.Sprintf(", data pool %s", pOpts.DataPool)
|
2020-01-10 09:09:49 +00:00
|
|
|
err := options.SetString(librbd.RbdImageOptionDataPool, pOpts.DataPool)
|
2020-01-07 13:45:52 +00:00
|
|
|
if err != nil {
|
2020-06-25 11:30:04 +00:00
|
|
|
return fmt.Errorf("failed to set data pool: %w", err)
|
2020-01-07 13:45:52 +00:00
|
|
|
}
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, logMsg,
|
2020-06-18 12:07:21 +00:00
|
|
|
pOpts, volSzMiB, pOpts.imageFeatureSet.Names(), pOpts.Monitors)
|
2019-09-11 07:08:55 +00:00
|
|
|
|
2020-06-18 12:07:21 +00:00
|
|
|
if pOpts.imageFeatureSet != 0 {
|
|
|
|
err := options.SetUint64(librbd.RbdImageOptionFeatures, uint64(pOpts.imageFeatureSet))
|
2020-01-07 13:45:52 +00:00
|
|
|
if err != nil {
|
2020-06-25 11:30:04 +00:00
|
|
|
return fmt.Errorf("failed to set image features: %w", err)
|
2020-01-07 13:45:52 +00:00
|
|
|
}
|
|
|
|
}
|
2019-09-10 09:56:08 +00:00
|
|
|
|
2020-03-18 08:30:02 +00:00
|
|
|
err := pOpts.Connect(cr)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-03 07:37:44 +00:00
|
|
|
err = pOpts.openIoctx()
|
2020-01-07 13:45:52 +00:00
|
|
|
if err != nil {
|
2020-06-25 11:30:04 +00:00
|
|
|
return fmt.Errorf("failed to get IOContext: %w", err)
|
2019-09-10 09:56:08 +00:00
|
|
|
}
|
2018-01-09 18:59:50 +00:00
|
|
|
|
2020-06-03 07:37:44 +00:00
|
|
|
err = librbd.CreateImage(pOpts.ioctx, pOpts.RbdImageName,
|
2020-01-17 15:44:06 +00:00
|
|
|
uint64(util.RoundOffVolSize(pOpts.VolSize)*helpers.MiB), options)
|
2018-01-09 18:59:50 +00:00
|
|
|
if err != nil {
|
2020-06-25 11:30:04 +00:00
|
|
|
return fmt.Errorf("failed to create rbd image: %w", err)
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
2021-04-01 11:18:28 +00:00
|
|
|
if pOpts.isEncrypted() {
|
|
|
|
err = pOpts.setupEncryption(ctx)
|
|
|
|
if err != nil {
|
2021-07-10 10:45:11 +00:00
|
|
|
return fmt.Errorf("failed to setup encryption for image %s: %w", pOpts, err)
|
2021-04-01 11:18:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-21 16:05:38 +00:00
|
|
|
if pOpts.ThickProvision {
|
2021-01-28 13:59:48 +00:00
|
|
|
err = pOpts.allocate(0)
|
2020-12-21 16:05:38 +00:00
|
|
|
if err != nil {
|
|
|
|
// nolint:errcheck // deleteImage() will log errors in
|
|
|
|
// case it fails, no need to log them here again
|
|
|
|
_ = deleteImage(ctx, pOpts, cr)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-12-21 16:05:38 +00:00
|
|
|
return fmt.Errorf("failed to thick provision image: %w", err)
|
|
|
|
}
|
2021-01-28 08:45:51 +00:00
|
|
|
|
|
|
|
err = pOpts.setThickProvisioned()
|
|
|
|
if err != nil {
|
|
|
|
// nolint:errcheck // deleteImage() will log errors in
|
|
|
|
// case it fails, no need to log them here again
|
|
|
|
_ = deleteImage(ctx, pOpts, cr)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-01-28 08:45:51 +00:00
|
|
|
return fmt.Errorf("failed to mark image as thick-provisioned: %w", err)
|
|
|
|
}
|
2020-12-21 16:05:38 +00:00
|
|
|
}
|
|
|
|
|
2018-01-09 18:59:50 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-03-12 12:37:15 +00:00
|
|
|
func (ri *rbdImage) openIoctx() error {
|
|
|
|
if ri.ioctx != nil {
|
2020-06-03 07:37:44 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-03-12 12:37:15 +00:00
|
|
|
ioctx, err := ri.conn.GetIoctx(ri.Pool)
|
2020-06-03 07:37:44 +00:00
|
|
|
if err != nil {
|
|
|
|
// GetIoctx() can return util.ErrPoolNotFound
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-03-12 12:37:15 +00:00
|
|
|
ioctx.SetNamespace(ri.RadosNamespace)
|
|
|
|
ri.ioctx = ioctx
|
2020-06-03 07:37:44 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-24 06:46:47 +00:00
|
|
|
// getImageID queries rbd about the given image and stores its id, returns
|
2020-07-19 12:21:03 +00:00
|
|
|
// ErrImageNotFound if provided image is not found.
|
2020-06-24 06:46:47 +00:00
|
|
|
func (rv *rbdVolume) getImageID() error {
|
|
|
|
if rv.ImageID != "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
image, err := rv.open()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer image.Close()
|
|
|
|
|
|
|
|
id, err := image.GetId()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
rv.ImageID = id
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 06:46:47 +00:00
|
|
|
return nil
|
|
|
|
}
|
2020-06-24 06:56:39 +00:00
|
|
|
|
2021-03-12 12:37:15 +00:00
|
|
|
// open the rbdImage after it has been connected.
|
2020-05-08 14:10:18 +00:00
|
|
|
// ErrPoolNotFound or ErrImageNotFound are returned in case the pool or image
|
|
|
|
// can not be found, other errors will contain more details about other issues
|
|
|
|
// (permission denied, ...) and are expected to relate to configuration issues.
|
2021-03-12 12:37:15 +00:00
|
|
|
func (ri *rbdImage) open() (*librbd.Image, error) {
|
|
|
|
err := ri.openIoctx()
|
2020-05-08 14:10:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-03-12 12:37:15 +00:00
|
|
|
image, err := librbd.OpenImage(ri.ioctx, ri.RbdImageName, librbd.NoSnapshot)
|
2020-05-08 14:10:18 +00:00
|
|
|
if err != nil {
|
2020-07-02 21:43:40 +00:00
|
|
|
if errors.Is(err, librbd.ErrNotFound) {
|
2020-07-10 01:05:42 +00:00
|
|
|
err = util.JoinErrors(ErrImageNotFound, err)
|
2020-05-08 14:10:18 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-05-08 14:10:18 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-05-08 14:10:18 +00:00
|
|
|
return image, nil
|
|
|
|
}
|
|
|
|
|
2020-12-21 16:05:38 +00:00
|
|
|
// allocate uses the stripe-period of the image to fully allocate (thick
|
|
|
|
// provision) the image.
|
2021-01-28 13:59:48 +00:00
|
|
|
func (rv *rbdVolume) allocate(offset uint64) error {
|
2021-01-22 15:20:36 +00:00
|
|
|
// We do not want to call discard, we really want to write zeros to get
|
|
|
|
// the allocation. This sets the option for the re-used connection, and
|
|
|
|
// all subsequent images that are opened. That is not a problem, as
|
|
|
|
// this is the only place images get written.
|
|
|
|
err := rv.conn.DisableDiscardOnZeroedWriteSame()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-12-21 16:05:38 +00:00
|
|
|
image, err := rv.open()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer image.Close()
|
|
|
|
|
|
|
|
st, err := image.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
sc, err := image.GetStripeCount()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-03-09 13:06:43 +00:00
|
|
|
// blockSize is the stripe-period: size of the object-size multiplied
|
2020-12-21 16:05:38 +00:00
|
|
|
// by the stripe-count
|
2021-03-09 13:06:43 +00:00
|
|
|
blockSize := sc * (1 << st.Order)
|
|
|
|
zeroBlock := make([]byte, blockSize)
|
2020-12-21 16:05:38 +00:00
|
|
|
|
|
|
|
// the actual size of the image as available in the pool, can be
|
|
|
|
// marginally different from the requested image size
|
2021-03-09 13:06:43 +00:00
|
|
|
size := st.Size - offset
|
|
|
|
|
|
|
|
// In case the remaining space on the volume is smaller than blockSize,
|
|
|
|
// write a partial block with WriteAt() after this loop.
|
|
|
|
for size > blockSize {
|
|
|
|
writeSize := size
|
|
|
|
// write a maximum of 1GB per WriteSame() call
|
|
|
|
if size > helpers.GiB {
|
|
|
|
writeSize = helpers.GiB
|
|
|
|
}
|
|
|
|
|
|
|
|
// round down to the size of a zeroBlock
|
|
|
|
if (writeSize % blockSize) != 0 {
|
|
|
|
writeSize = (writeSize / blockSize) * blockSize
|
|
|
|
}
|
2020-12-21 16:05:38 +00:00
|
|
|
|
2021-03-09 13:06:43 +00:00
|
|
|
_, err = image.WriteSame(offset, writeSize, zeroBlock,
|
|
|
|
rados.OpFlagNone)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to allocate %d/%d bytes at "+
|
|
|
|
"offset %d: %w", writeSize, blockSize, offset, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// write succeeded
|
|
|
|
size -= writeSize
|
|
|
|
offset += writeSize
|
|
|
|
}
|
|
|
|
|
|
|
|
// write the last remaining bytes, in case the image size can not be
|
|
|
|
// written with the optimal blockSize
|
|
|
|
if size != 0 {
|
|
|
|
_, err = image.WriteAt(zeroBlock[:size], int64(offset))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to allocate %d bytes at "+
|
|
|
|
"offset %d: %w", size, offset, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2020-12-21 16:05:38 +00:00
|
|
|
}
|
|
|
|
|
2020-07-22 13:33:36 +00:00
|
|
|
// isInUse checks if there is a watcher on the image. It returns true if there
|
|
|
|
// is a watcher on the image, otherwise returns false.
|
|
|
|
func (rv *rbdVolume) isInUse() (bool, error) {
|
|
|
|
image, err := rv.open()
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, ErrImageNotFound) || errors.Is(err, util.ErrPoolNotFound) {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
// any error should assume something else is using the image
|
|
|
|
return true, err
|
|
|
|
}
|
|
|
|
defer image.Close()
|
|
|
|
|
|
|
|
watchers, err := image.ListWatchers()
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
2021-10-07 04:57:17 +00:00
|
|
|
mirrorInfo, err := image.GetMirrorImageInfo()
|
2020-10-21 05:34:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2021-10-07 04:57:17 +00:00
|
|
|
rv.Primary = mirrorInfo.Primary
|
|
|
|
|
2020-07-22 13:33:36 +00:00
|
|
|
// because we opened the image, there is at least one watcher
|
2020-10-21 05:34:03 +00:00
|
|
|
defaultWatchers := 1
|
|
|
|
if rv.Primary {
|
2021-04-19 11:12:46 +00:00
|
|
|
// if rbd mirror daemon is running, a watcher will be added by the rbd
|
|
|
|
// mirror daemon for mirrored images.
|
2020-10-21 05:34:03 +00:00
|
|
|
defaultWatchers++
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-19 11:12:46 +00:00
|
|
|
return len(watchers) > defaultWatchers, nil
|
2020-07-22 13:33:36 +00:00
|
|
|
}
|
|
|
|
|
2021-06-07 07:31:38 +00:00
|
|
|
// checkImageFeatures check presence of imageFeatures parameter. It returns true when
|
|
|
|
// there imageFeatures is missing or empty, skips missing parameter for non-static volumes
|
|
|
|
// for backward compatibility.
|
|
|
|
func checkImageFeatures(imageFeatures string, ok, static bool) bool {
|
|
|
|
return static && (!ok || imageFeatures == "")
|
|
|
|
}
|
|
|
|
|
2021-06-15 12:25:29 +00:00
|
|
|
// isNotMountPoint checks whether MountPoint does not exists and
|
|
|
|
// also discards error indicating mountPoint exists.
|
|
|
|
func isNotMountPoint(mounter mount.Interface, stagingTargetPath string) (bool, error) {
|
|
|
|
isNotMnt, err := mount.IsNotMountPoint(mounter, stagingTargetPath)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
err = nil
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-06-15 12:25:29 +00:00
|
|
|
return isNotMnt, err
|
|
|
|
}
|
|
|
|
|
2020-06-24 06:56:39 +00:00
|
|
|
// addRbdManagerTask adds a ceph manager task to execute command
|
|
|
|
// asynchronously. If command is not found returns a bool set to false
|
2020-07-19 12:21:03 +00:00
|
|
|
// example arg ["trash", "remove","pool/image"].
|
2020-06-24 06:56:39 +00:00
|
|
|
func addRbdManagerTask(ctx context.Context, pOpts *rbdVolume, arg []string) (bool, error) {
|
|
|
|
args := []string{"rbd", "task", "add"}
|
|
|
|
args = append(args, arg...)
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(
|
2021-06-25 11:52:34 +00:00
|
|
|
ctx,
|
|
|
|
"executing %v for image (%s) using mon %s, pool %s",
|
|
|
|
args,
|
|
|
|
pOpts.RbdImageName,
|
|
|
|
pOpts.Monitors,
|
|
|
|
pOpts.Pool)
|
2020-06-24 06:56:39 +00:00
|
|
|
supported := true
|
2020-07-22 12:11:41 +00:00
|
|
|
_, stderr, err := util.ExecCommand(ctx, "ceph", args...)
|
2019-08-06 16:59:40 +00:00
|
|
|
if err != nil {
|
2020-04-09 15:14:43 +00:00
|
|
|
switch {
|
2020-07-22 12:53:22 +00:00
|
|
|
case strings.Contains(stderr, rbdTaskRemoveCmdInvalidString1) &&
|
|
|
|
strings.Contains(stderr, rbdTaskRemoveCmdInvalidString2):
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(
|
2021-06-25 11:52:34 +00:00
|
|
|
ctx,
|
|
|
|
"cluster with cluster ID (%s) does not support Ceph manager based rbd commands"+
|
|
|
|
"(minimum ceph version required is v14.2.3)",
|
|
|
|
pOpts.ClusterID)
|
2020-06-24 06:56:39 +00:00
|
|
|
supported = false
|
2020-07-22 12:53:22 +00:00
|
|
|
case strings.HasPrefix(stderr, rbdTaskRemoveCmdAccessDeniedMessage):
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(ctx, "access denied to Ceph MGR-based rbd commands on cluster ID (%s)", pOpts.ClusterID)
|
2020-06-24 06:56:39 +00:00
|
|
|
supported = false
|
2020-04-09 15:14:43 +00:00
|
|
|
default:
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(ctx, "uncaught error while scheduling a task (%v): %s", err, stderr)
|
2019-08-06 16:59:40 +00:00
|
|
|
}
|
|
|
|
}
|
2021-06-15 06:09:01 +00:00
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("%w. stdError:%s", err, stderr)
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 06:56:39 +00:00
|
|
|
return supported, err
|
2019-08-06 16:59:40 +00:00
|
|
|
}
|
|
|
|
|
2021-07-27 06:43:39 +00:00
|
|
|
// getTrashPath returns the image path for trash operation.
|
|
|
|
func (rv *rbdVolume) getTrashPath() string {
|
|
|
|
trashPath := rv.Pool
|
|
|
|
if rv.RadosNamespace != "" {
|
|
|
|
trashPath = trashPath + "/" + rv.RadosNamespace
|
|
|
|
}
|
|
|
|
|
|
|
|
return trashPath + "/" + rv.ImageID
|
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// deleteImage deletes a ceph image with provision and volume options.
|
2019-08-22 16:57:23 +00:00
|
|
|
func deleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) error {
|
2019-04-22 21:35:39 +00:00
|
|
|
image := pOpts.RbdImageName
|
2021-03-15 14:25:01 +00:00
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "rbd: delete %s using mon %s, pool %s", image, pOpts.Monitors, pOpts.Pool)
|
2021-03-15 14:25:01 +00:00
|
|
|
|
2020-06-24 06:56:39 +00:00
|
|
|
// Support deleting the older rbd images whose imageID is not stored in omap
|
2020-07-02 06:45:47 +00:00
|
|
|
err := pOpts.getImageID()
|
2020-06-24 06:56:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-03-24 08:54:57 +00:00
|
|
|
if pOpts.isEncrypted() {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "rbd: going to remove DEK for %q", pOpts)
|
2021-03-24 08:54:57 +00:00
|
|
|
if err = pOpts.encryption.RemoveDEK(pOpts.VolID); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(ctx, "failed to clean the passphrase for volume %s: %s", pOpts.VolID, err)
|
2021-03-24 08:54:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-24 06:56:39 +00:00
|
|
|
err = pOpts.openIoctx()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2020-06-24 06:56:39 +00:00
|
|
|
rbdImage := librbd.GetImage(pOpts.ioctx, image)
|
|
|
|
err = rbdImage.Trash(0)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to delete rbd image: %s, error: %v", pOpts, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 06:56:39 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-08-06 16:59:40 +00:00
|
|
|
|
|
|
|
// attempt to use Ceph manager based deletion support if available
|
2021-07-27 06:43:39 +00:00
|
|
|
|
2021-07-13 12:21:05 +00:00
|
|
|
args := []string{
|
|
|
|
"trash", "remove",
|
2021-07-27 06:43:39 +00:00
|
|
|
pOpts.getTrashPath(),
|
2020-06-24 06:56:39 +00:00
|
|
|
"--id", cr.ID,
|
|
|
|
"--keyfile=" + cr.KeyFile,
|
|
|
|
"-m", pOpts.Monitors,
|
|
|
|
}
|
|
|
|
rbdCephMgrSupported, err := addRbdManagerTask(ctx, pOpts, args)
|
2020-03-16 04:59:16 +00:00
|
|
|
if rbdCephMgrSupported && err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to add task to delete rbd image: %s, %v", pOpts, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-03-16 04:59:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-08-06 16:59:40 +00:00
|
|
|
if !rbdCephMgrSupported {
|
2020-06-24 06:56:39 +00:00
|
|
|
err = librbd.TrashRemove(pOpts.ioctx, pOpts.ImageID, true)
|
2020-03-16 09:26:43 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to delete rbd image: %s, %v", pOpts, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-03-16 09:26:43 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-06-24 06:56:39 +00:00
|
|
|
}
|
2020-03-16 09:26:43 +00:00
|
|
|
|
2020-06-24 06:56:39 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
func (rv *rbdVolume) getCloneDepth(ctx context.Context) (uint, error) {
|
|
|
|
var depth uint
|
2021-03-12 12:37:15 +00:00
|
|
|
vol := rbdVolume{}
|
2021-03-15 09:58:29 +00:00
|
|
|
|
2021-03-12 12:37:15 +00:00
|
|
|
vol.Pool = rv.Pool
|
|
|
|
vol.Monitors = rv.Monitors
|
|
|
|
vol.RbdImageName = rv.RbdImageName
|
2021-03-15 09:50:09 +00:00
|
|
|
vol.conn = rv.conn.Copy()
|
2020-06-24 07:43:24 +00:00
|
|
|
|
|
|
|
for {
|
|
|
|
if vol.RbdImageName == "" {
|
|
|
|
return depth, nil
|
|
|
|
}
|
2021-03-18 05:55:22 +00:00
|
|
|
err := vol.openIoctx()
|
|
|
|
if err != nil {
|
|
|
|
return depth, err
|
|
|
|
}
|
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
err = vol.getImageInfo()
|
2021-03-18 05:55:22 +00:00
|
|
|
// FIXME: create and destroy the vol inside the loop.
|
|
|
|
// see https://github.com/ceph/ceph-csi/pull/1838#discussion_r598530807
|
|
|
|
vol.ioctx.Destroy()
|
|
|
|
vol.ioctx = nil
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
2020-07-14 07:26:55 +00:00
|
|
|
// if the parent image is moved to trash the name will be present
|
|
|
|
// in rbd image info but the image will be in trash, in that case
|
|
|
|
// return the found depth
|
2020-07-10 01:05:42 +00:00
|
|
|
if errors.Is(err, ErrImageNotFound) {
|
2020-07-14 07:26:55 +00:00
|
|
|
return depth, nil
|
|
|
|
}
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to check depth on image %s: %s", &vol, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return depth, err
|
|
|
|
}
|
|
|
|
if vol.ParentName != "" {
|
|
|
|
depth++
|
|
|
|
}
|
|
|
|
vol.RbdImageName = vol.ParentName
|
2021-03-18 11:32:10 +00:00
|
|
|
vol.Pool = vol.ParentPool
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-27 11:09:45 +00:00
|
|
|
type trashSnapInfo struct {
|
|
|
|
origSnapName string
|
|
|
|
}
|
|
|
|
|
2021-06-25 11:52:34 +00:00
|
|
|
func flattenClonedRbdImages(
|
|
|
|
ctx context.Context,
|
|
|
|
snaps []librbd.SnapInfo,
|
|
|
|
pool, monitors, rbdImageName string,
|
|
|
|
cr *util.Credentials) error {
|
2021-03-12 12:37:15 +00:00
|
|
|
rv := &rbdVolume{}
|
|
|
|
rv.Monitors = monitors
|
|
|
|
rv.Pool = pool
|
|
|
|
rv.RbdImageName = rbdImageName
|
|
|
|
|
2020-07-01 07:05:07 +00:00
|
|
|
defer rv.Destroy()
|
|
|
|
err := rv.Connect(cr)
|
2020-06-24 06:56:39 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to open connection %s; err %v", rv, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 06:56:39 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-08-27 11:09:45 +00:00
|
|
|
var origNameList []trashSnapInfo
|
|
|
|
for _, snapInfo := range snaps {
|
|
|
|
// check if the snapshot belongs to trash namespace.
|
|
|
|
isTrash, retErr := rv.isTrashSnap(snapInfo.Id)
|
|
|
|
if retErr != nil {
|
|
|
|
return retErr
|
|
|
|
}
|
|
|
|
|
|
|
|
if isTrash {
|
|
|
|
// get original snap name for the snapshot in trash namespace
|
|
|
|
origSnapName, retErr := rv.getOrigSnapName(snapInfo.Id)
|
|
|
|
if retErr != nil {
|
|
|
|
return retErr
|
2020-07-01 07:05:07 +00:00
|
|
|
}
|
2020-08-27 11:09:45 +00:00
|
|
|
origNameList = append(origNameList, trashSnapInfo{origSnapName})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, snapName := range origNameList {
|
|
|
|
rv.RbdImageName = snapName.origSnapName
|
|
|
|
err = rv.flattenRbdImage(ctx, cr, true, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to flatten %s; err %v", rv, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-08-27 11:09:45 +00:00
|
|
|
continue
|
2020-07-01 07:05:07 +00:00
|
|
|
}
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-01 07:05:07 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 11:52:34 +00:00
|
|
|
func (rv *rbdVolume) flattenRbdImage(
|
|
|
|
ctx context.Context,
|
|
|
|
cr *util.Credentials,
|
|
|
|
forceFlatten bool,
|
|
|
|
hardlimit, softlimit uint) error {
|
2020-07-01 07:05:07 +00:00
|
|
|
var depth uint
|
|
|
|
var err error
|
|
|
|
|
|
|
|
// skip clone depth check if request is for force flatten
|
|
|
|
if !forceFlatten {
|
|
|
|
depth, err = rv.getCloneDepth(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ExtendedLog(
|
2021-06-25 11:52:34 +00:00
|
|
|
ctx,
|
|
|
|
"clone depth is (%d), configured softlimit (%d) and hardlimit (%d) for %s",
|
|
|
|
depth,
|
|
|
|
softlimit,
|
|
|
|
hardlimit,
|
|
|
|
rv)
|
2020-07-01 07:05:07 +00:00
|
|
|
}
|
2020-06-24 07:43:24 +00:00
|
|
|
|
2021-04-05 04:40:00 +00:00
|
|
|
if !forceFlatten && (depth < hardlimit) && (depth < softlimit) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
args := []string{"flatten", rv.String(), "--id", cr.ID, "--keyfile=" + cr.KeyFile, "-m", rv.Monitors}
|
|
|
|
supported, err := addRbdManagerTask(ctx, rv, args)
|
|
|
|
if supported {
|
|
|
|
if err != nil {
|
|
|
|
// discard flattening error if the image does not have any parent
|
|
|
|
rbdFlattenNoParent := fmt.Sprintf("Image %s/%s does not have a parent", rv.Pool, rv.RbdImageName)
|
|
|
|
if strings.Contains(err.Error(), rbdFlattenNoParent) {
|
|
|
|
return nil
|
|
|
|
}
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to add task flatten for %s : %v", rv, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-05 04:40:00 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if forceFlatten || depth >= hardlimit {
|
|
|
|
return fmt.Errorf("%w: flatten is in progress for image %s", ErrFlattenInProgress, rv.RbdImageName)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !supported {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(
|
2021-06-25 11:52:34 +00:00
|
|
|
ctx,
|
|
|
|
"task manager does not support flatten,image will be flattened once hardlimit is reached: %v",
|
|
|
|
err)
|
2021-04-05 04:40:00 +00:00
|
|
|
if forceFlatten || depth >= hardlimit {
|
|
|
|
err = rv.Connect(cr)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-04-05 04:40:00 +00:00
|
|
|
err := rv.flatten()
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "rbd failed to flatten image %s %s: %v", rv.Pool, rv.RbdImageName, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-05 04:40:00 +00:00
|
|
|
return err
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
2020-03-16 04:59:16 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
2021-04-05 04:40:00 +00:00
|
|
|
|
2020-06-24 06:56:39 +00:00
|
|
|
return nil
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 08:42:16 +00:00
|
|
|
func (rv *rbdVolume) getParentName() (string, error) {
|
|
|
|
rbdImage, err := rv.open()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
defer rbdImage.Close()
|
|
|
|
|
2020-11-02 11:55:41 +00:00
|
|
|
parentInfo, err := rbdImage.GetParent()
|
2020-08-04 08:42:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-11-02 11:55:41 +00:00
|
|
|
return parentInfo.Image.ImageName, nil
|
2020-08-04 08:42:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (rv *rbdVolume) flatten() error {
|
|
|
|
rbdImage, err := rv.open()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer rbdImage.Close()
|
|
|
|
|
|
|
|
err = rbdImage.Flatten()
|
|
|
|
if err != nil {
|
|
|
|
// rbd image flatten will fail if the rbd image does not have a parent
|
|
|
|
parent, pErr := rv.getParentName()
|
|
|
|
if pErr != nil {
|
|
|
|
return util.JoinErrors(err, pErr)
|
|
|
|
}
|
|
|
|
if parent == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-08-04 08:42:16 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
func (rv *rbdVolume) hasFeature(feature uint64) bool {
|
|
|
|
return (uint64(rv.imageFeatureSet) & feature) == feature
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rv *rbdVolume) checkImageChainHasFeature(ctx context.Context, feature uint64) (bool, error) {
|
2021-03-12 12:37:15 +00:00
|
|
|
vol := rbdVolume{}
|
2021-03-15 09:58:29 +00:00
|
|
|
|
2021-03-12 12:37:15 +00:00
|
|
|
vol.Pool = rv.Pool
|
|
|
|
vol.RadosNamespace = rv.RadosNamespace
|
|
|
|
vol.Monitors = rv.Monitors
|
|
|
|
vol.RbdImageName = rv.RbdImageName
|
2021-03-15 09:50:09 +00:00
|
|
|
vol.conn = rv.conn.Copy()
|
2021-03-12 12:37:15 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
for {
|
|
|
|
if vol.RbdImageName == "" {
|
|
|
|
return false, nil
|
|
|
|
}
|
2021-03-18 05:55:22 +00:00
|
|
|
err := vol.openIoctx()
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
err = vol.getImageInfo()
|
2021-03-18 05:55:22 +00:00
|
|
|
// FIXME: create and destroy the vol inside the loop.
|
|
|
|
// see https://github.com/ceph/ceph-csi/pull/1838#discussion_r598530807
|
|
|
|
vol.ioctx.Destroy()
|
|
|
|
vol.ioctx = nil
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
2021-05-03 11:00:05 +00:00
|
|
|
// call to getImageInfo returns the parent name even if the parent
|
|
|
|
// is in the trash, when we try to open the parent image to get its
|
|
|
|
// information it fails because it is already in trash. We should
|
|
|
|
// treat error as nil if the parent is not found.
|
|
|
|
if errors.Is(err, ErrImageNotFound) {
|
|
|
|
return false, nil
|
|
|
|
}
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get image info for %s: %s", vol.String(), err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if f := vol.hasFeature(feature); f {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
vol.RbdImageName = vol.ParentName
|
2021-03-18 11:32:10 +00:00
|
|
|
vol.Pool = vol.ParentPool
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// genSnapFromSnapID generates a rbdSnapshot structure from the provided identifier, updating
|
2020-07-19 12:21:03 +00:00
|
|
|
// the structure with elements from on-disk snapshot metadata as well.
|
2021-06-25 11:52:34 +00:00
|
|
|
func genSnapFromSnapID(
|
|
|
|
ctx context.Context,
|
|
|
|
rbdSnap *rbdSnapshot,
|
|
|
|
snapshotID string,
|
|
|
|
cr *util.Credentials,
|
|
|
|
secrets map[string]string) error {
|
2021-09-07 06:05:11 +00:00
|
|
|
var vi util.CSIIdentifier
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2021-03-12 12:50:07 +00:00
|
|
|
rbdSnap.VolID = snapshotID
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2021-03-12 12:50:07 +00:00
|
|
|
err := vi.DecomposeCSIID(rbdSnap.VolID)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "error decoding snapshot ID (%s) (%s)", err, rbdSnap.VolID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
rbdSnap.ClusterID = vi.ClusterID
|
|
|
|
|
2021-09-07 06:05:11 +00:00
|
|
|
rbdSnap.Monitors, _, err = util.GetMonsAndClusterID(ctx, rbdSnap.ClusterID, false)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed getting mons (%s)", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-05-14 13:28:14 +00:00
|
|
|
rbdSnap.Pool, err = util.GetPoolName(rbdSnap.Monitors, cr, vi.LocationID)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-01-24 16:26:56 +00:00
|
|
|
rbdSnap.JournalPool = rbdSnap.Pool
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2021-09-16 13:47:57 +00:00
|
|
|
rbdSnap.RadosNamespace, err = util.GetRadosNamespace(util.CsiConfigFile, rbdSnap.ClusterID)
|
2020-06-01 13:57:51 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
|
2020-05-12 21:05:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer j.Destroy()
|
|
|
|
|
|
|
|
imageAttributes, err := j.GetImageAttributes(
|
|
|
|
ctx, rbdSnap.Pool, vi.ObjectUUID, true)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-07-06 06:22:34 +00:00
|
|
|
rbdSnap.ImageID = imageAttributes.ImageID
|
2020-01-24 16:26:56 +00:00
|
|
|
rbdSnap.RequestName = imageAttributes.RequestName
|
|
|
|
rbdSnap.RbdImageName = imageAttributes.SourceName
|
|
|
|
rbdSnap.RbdSnapName = imageAttributes.ImageName
|
2020-06-24 07:43:24 +00:00
|
|
|
rbdSnap.ReservedID = vi.ObjectUUID
|
2021-03-30 20:08:24 +00:00
|
|
|
rbdSnap.Owner = imageAttributes.Owner
|
2020-01-24 16:26:56 +00:00
|
|
|
// convert the journal pool ID to name, for use in DeleteSnapshot cases
|
|
|
|
if imageAttributes.JournalPoolID != util.InvalidPoolID {
|
2020-05-14 13:28:14 +00:00
|
|
|
rbdSnap.JournalPool, err = util.GetPoolName(rbdSnap.Monitors, cr, imageAttributes.JournalPoolID)
|
2020-01-24 16:26:56 +00:00
|
|
|
if err != nil {
|
|
|
|
// TODO: If pool is not found we may leak the image (as DeleteSnapshot will return success)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2021-03-30 20:08:24 +00:00
|
|
|
err = rbdSnap.Connect(cr)
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
rbdSnap.Destroy()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to connect to %q: %w",
|
2021-05-07 05:30:37 +00:00
|
|
|
rbdSnap, err)
|
2021-03-30 20:08:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if imageAttributes.KmsID != "" {
|
|
|
|
err = rbdSnap.configureEncryption(imageAttributes.KmsID, secrets)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to configure encryption for "+
|
2021-05-07 05:30:37 +00:00
|
|
|
"%q: %w", rbdSnap, err)
|
2021-03-30 20:08:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
// generateVolumeFromVolumeID generates a rbdVolume structure from the provided identifier.
|
2021-06-25 11:52:34 +00:00
|
|
|
func generateVolumeFromVolumeID(
|
|
|
|
ctx context.Context,
|
|
|
|
volumeID string,
|
2021-07-22 10:36:39 +00:00
|
|
|
vi util.CSIIdentifier,
|
2021-06-25 11:52:34 +00:00
|
|
|
cr *util.Credentials,
|
|
|
|
secrets map[string]string) (*rbdVolume, error) {
|
2019-04-22 21:35:39 +00:00
|
|
|
var (
|
2021-09-07 06:05:11 +00:00
|
|
|
rbdVol *rbdVolume
|
|
|
|
err error
|
2019-04-22 21:35:39 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// rbdVolume fields that are not filled up in this function are:
|
2020-06-24 07:43:24 +00:00
|
|
|
// Mounter, MultiNodeWritable
|
2021-03-12 12:37:15 +00:00
|
|
|
rbdVol = &rbdVolume{}
|
|
|
|
rbdVol.VolID = volumeID
|
2020-10-21 12:49:45 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
rbdVol.ClusterID = vi.ClusterID
|
|
|
|
|
2021-09-07 06:05:11 +00:00
|
|
|
rbdVol.Monitors, _, err = util.GetMonsAndClusterID(ctx, rbdVol.ClusterID, false)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed getting mons (%s)", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return rbdVol, err
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
|
2021-09-16 13:47:57 +00:00
|
|
|
rbdVol.RadosNamespace, err = util.GetRadosNamespace(util.CsiConfigFile, rbdVol.ClusterID)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
2020-10-29 09:48:31 +00:00
|
|
|
return rbdVol, err
|
2020-03-17 13:39:35 +00:00
|
|
|
}
|
|
|
|
|
2020-10-21 12:49:45 +00:00
|
|
|
j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
|
2020-03-17 13:39:35 +00:00
|
|
|
if err != nil {
|
2020-06-24 07:43:24 +00:00
|
|
|
return rbdVol, err
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
2020-10-21 12:49:45 +00:00
|
|
|
defer j.Destroy()
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2020-10-21 12:49:45 +00:00
|
|
|
rbdVol.Pool, err = util.GetPoolName(rbdVol.Monitors, cr, vi.LocationID)
|
|
|
|
if err != nil {
|
|
|
|
return rbdVol, err
|
2020-06-01 13:57:51 +00:00
|
|
|
}
|
|
|
|
|
2020-10-21 12:49:45 +00:00
|
|
|
err = rbdVol.Connect(cr)
|
2020-05-12 21:05:55 +00:00
|
|
|
if err != nil {
|
2020-06-24 07:43:24 +00:00
|
|
|
return rbdVol, err
|
2020-05-12 21:05:55 +00:00
|
|
|
}
|
2020-10-21 12:49:45 +00:00
|
|
|
rbdVol.JournalPool = rbdVol.Pool
|
2020-05-12 21:05:55 +00:00
|
|
|
|
|
|
|
imageAttributes, err := j.GetImageAttributes(
|
|
|
|
ctx, rbdVol.Pool, vi.ObjectUUID, false)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
2020-06-24 07:43:24 +00:00
|
|
|
return rbdVol, err
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
2020-06-24 07:43:24 +00:00
|
|
|
rbdVol.RequestName = imageAttributes.RequestName
|
|
|
|
rbdVol.RbdImageName = imageAttributes.ImageName
|
|
|
|
rbdVol.ReservedID = vi.ObjectUUID
|
2020-07-06 06:22:34 +00:00
|
|
|
rbdVol.ImageID = imageAttributes.ImageID
|
2020-11-25 17:06:53 +00:00
|
|
|
rbdVol.Owner = imageAttributes.Owner
|
2020-06-24 07:43:24 +00:00
|
|
|
|
2020-01-24 16:26:56 +00:00
|
|
|
if imageAttributes.KmsID != "" {
|
2021-02-25 16:26:05 +00:00
|
|
|
err = rbdVol.configureEncryption(imageAttributes.KmsID, secrets)
|
2020-01-24 16:26:56 +00:00
|
|
|
if err != nil {
|
2020-06-24 07:43:24 +00:00
|
|
|
return rbdVol, err
|
2020-01-24 16:26:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// convert the journal pool ID to name, for use in DeleteVolume cases
|
|
|
|
if imageAttributes.JournalPoolID >= 0 {
|
2020-05-14 13:28:14 +00:00
|
|
|
rbdVol.JournalPool, err = util.GetPoolName(rbdVol.Monitors, cr, imageAttributes.JournalPoolID)
|
2020-01-29 11:44:45 +00:00
|
|
|
if err != nil {
|
2020-01-24 16:26:56 +00:00
|
|
|
// TODO: If pool is not found we may leak the image (as DeleteVolume will return success)
|
2020-06-24 07:43:24 +00:00
|
|
|
return rbdVol, err
|
2020-01-29 11:44:45 +00:00
|
|
|
}
|
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2020-07-06 06:22:34 +00:00
|
|
|
if rbdVol.ImageID == "" {
|
2020-12-04 08:36:05 +00:00
|
|
|
err = rbdVol.storeImageID(ctx, j)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return rbdVol, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err = rbdVol.getImageInfo()
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-03-17 13:39:35 +00:00
|
|
|
return rbdVol, err
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
// genVolFromVolID generates a rbdVolume structure from the provided identifier, updating
|
|
|
|
// the structure with elements from on-disk image metadata as well.
|
2021-06-25 11:52:34 +00:00
|
|
|
func genVolFromVolID(
|
|
|
|
ctx context.Context,
|
|
|
|
volumeID string,
|
|
|
|
cr *util.Credentials,
|
|
|
|
secrets map[string]string) (*rbdVolume, error) {
|
2021-07-22 10:36:39 +00:00
|
|
|
var (
|
|
|
|
vi util.CSIIdentifier
|
|
|
|
vol *rbdVolume
|
|
|
|
)
|
|
|
|
|
|
|
|
err := vi.DecomposeCSIID(volumeID)
|
|
|
|
if err != nil {
|
|
|
|
return vol, fmt.Errorf("%w: error decoding volume ID (%s) (%s)",
|
|
|
|
ErrInvalidVolID, err, volumeID)
|
|
|
|
}
|
|
|
|
|
|
|
|
vol, err = generateVolumeFromVolumeID(ctx, volumeID, vi, cr, secrets)
|
2021-06-25 11:52:34 +00:00
|
|
|
if !errors.Is(err, util.ErrKeyNotFound) && !errors.Is(err, util.ErrPoolNotFound) &&
|
|
|
|
!errors.Is(err, ErrImageNotFound) {
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return vol, err
|
|
|
|
}
|
|
|
|
|
2021-07-22 10:36:39 +00:00
|
|
|
// Check clusterID mapping exists
|
|
|
|
mapping, mErr := util.GetClusterMappingInfo(vi.ClusterID)
|
|
|
|
if mErr != nil {
|
|
|
|
return vol, mErr
|
|
|
|
}
|
|
|
|
if mapping != nil {
|
|
|
|
rbdVol, vErr := generateVolumeFromMapping(ctx, mapping, volumeID, vi, cr, secrets)
|
|
|
|
if !errors.Is(vErr, util.ErrKeyNotFound) && !errors.Is(vErr, util.ErrPoolNotFound) &&
|
|
|
|
!errors.Is(vErr, ErrImageNotFound) {
|
|
|
|
return rbdVol, vErr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// TODO: remove extracting volumeID from PV annotations.
|
|
|
|
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
// If the volume details are not found in the OMAP it can be a mirrored RBD
|
|
|
|
// image and the OMAP is already generated and the volumeHandle might not
|
|
|
|
// be the same in the PV.Spec.CSI.VolumeHandle. Check the PV annotation for
|
|
|
|
// the new volumeHandle. If the new volumeHandle is found, generate the RBD
|
|
|
|
// volume structure from the new volumeHandle.
|
2021-08-31 12:18:37 +00:00
|
|
|
c, cErr := k8s.NewK8sClient()
|
|
|
|
if cErr != nil {
|
|
|
|
return vol, cErr
|
|
|
|
}
|
|
|
|
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
listOpt := metav1.ListOptions{
|
|
|
|
LabelSelector: PVReplicatedLabelKey,
|
|
|
|
}
|
|
|
|
pvlist, pErr := c.CoreV1().PersistentVolumes().List(context.TODO(), listOpt)
|
|
|
|
if pErr != nil {
|
|
|
|
return vol, pErr
|
|
|
|
}
|
|
|
|
for i := range pvlist.Items {
|
|
|
|
if pvlist.Items[i].Spec.CSI != nil && pvlist.Items[i].Spec.CSI.VolumeHandle == volumeID {
|
|
|
|
if v, ok := pvlist.Items[i].Annotations[PVVolumeHandleAnnotationKey]; ok {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.UsefulLog(ctx, "found new volumeID %s for existing volumeID %s", v, volumeID)
|
2021-07-22 10:36:39 +00:00
|
|
|
err = vi.DecomposeCSIID(v)
|
|
|
|
if err != nil {
|
|
|
|
return vol, fmt.Errorf("%w: error decoding volume ID (%s) (%s)",
|
|
|
|
ErrInvalidVolID, err, v)
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-22 10:36:39 +00:00
|
|
|
return generateVolumeFromVolumeID(ctx, v, vi, cr, secrets)
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return vol, err
|
|
|
|
}
|
|
|
|
|
2021-07-22 10:36:39 +00:00
|
|
|
// generateVolumeFromMapping checks the clusterID and poolID mapping and
|
|
|
|
// generates retrieves the OMAP information from the poolID got from the
|
|
|
|
// mapping.
|
|
|
|
func generateVolumeFromMapping(
|
|
|
|
ctx context.Context,
|
|
|
|
mapping *[]util.ClusterMappingInfo,
|
|
|
|
volumeID string,
|
|
|
|
vi util.CSIIdentifier,
|
|
|
|
cr *util.Credentials,
|
|
|
|
secrets map[string]string) (*rbdVolume, error) {
|
|
|
|
nvi := vi
|
|
|
|
vol := &rbdVolume{}
|
|
|
|
// extract clusterID mapping
|
|
|
|
for _, cm := range *mapping {
|
|
|
|
for key, val := range cm.ClusterIDMapping {
|
2021-08-30 04:47:45 +00:00
|
|
|
mappedClusterID := util.GetMappedID(key, val, vi.ClusterID)
|
2021-07-22 10:36:39 +00:00
|
|
|
if mappedClusterID == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx,
|
2021-07-22 10:36:39 +00:00
|
|
|
"found new clusterID mapping %s for existing clusterID %s",
|
|
|
|
mappedClusterID,
|
|
|
|
vi.ClusterID)
|
|
|
|
// Add mapping clusterID to Identifier
|
|
|
|
nvi.ClusterID = mappedClusterID
|
|
|
|
poolID := fmt.Sprintf("%d", (vi.LocationID))
|
|
|
|
for _, pools := range cm.RBDpoolIDMappingInfo {
|
|
|
|
for key, val := range pools {
|
2021-08-30 04:47:45 +00:00
|
|
|
mappedPoolID := util.GetMappedID(key, val, poolID)
|
2021-07-22 10:36:39 +00:00
|
|
|
if mappedPoolID == "" {
|
|
|
|
continue
|
|
|
|
}
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx,
|
2021-07-22 10:36:39 +00:00
|
|
|
"found new poolID mapping %s for existing pooID %s",
|
|
|
|
mappedPoolID,
|
|
|
|
poolID)
|
|
|
|
pID, err := strconv.ParseInt(mappedPoolID, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return vol, err
|
|
|
|
}
|
|
|
|
// Add mapping poolID to Identifier
|
|
|
|
nvi.LocationID = pID
|
|
|
|
vol, err = generateVolumeFromVolumeID(ctx, volumeID, nvi, cr, secrets)
|
|
|
|
if !errors.Is(err, util.ErrKeyNotFound) && !errors.Is(err, util.ErrPoolNotFound) &&
|
|
|
|
!errors.Is(err, ErrImageNotFound) {
|
|
|
|
return vol, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return vol, util.ErrPoolNotFound
|
|
|
|
}
|
|
|
|
|
2021-06-25 11:52:34 +00:00
|
|
|
func genVolFromVolumeOptions(
|
|
|
|
ctx context.Context,
|
|
|
|
volOptions, credentials map[string]string,
|
2021-09-06 05:27:50 +00:00
|
|
|
disableInUseChecks, checkClusterIDMapping bool) (*rbdVolume, error) {
|
2019-03-13 13:46:56 +00:00
|
|
|
var (
|
2020-02-24 13:19:42 +00:00
|
|
|
ok bool
|
|
|
|
err error
|
|
|
|
namePrefix string
|
2019-03-13 13:46:56 +00:00
|
|
|
)
|
2019-03-02 17:29:52 +00:00
|
|
|
|
2018-03-06 22:33:57 +00:00
|
|
|
rbdVol := &rbdVolume{}
|
|
|
|
rbdVol.Pool, ok = volOptions["pool"]
|
2018-01-09 18:59:50 +00:00
|
|
|
if !ok {
|
2019-03-07 12:56:47 +00:00
|
|
|
return nil, errors.New("missing required parameter pool")
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2019-03-02 17:29:52 +00:00
|
|
|
|
2019-09-10 09:56:08 +00:00
|
|
|
rbdVol.DataPool = volOptions["dataPool"]
|
2020-02-24 13:19:42 +00:00
|
|
|
if namePrefix, ok = volOptions["volumeNamePrefix"]; ok {
|
|
|
|
rbdVol.NamePrefix = namePrefix
|
|
|
|
}
|
2019-09-10 09:56:08 +00:00
|
|
|
|
2021-09-07 06:05:11 +00:00
|
|
|
clusterID, err := util.GetClusterID(volOptions)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
rbdVol.Monitors, rbdVol.ClusterID, err = util.GetMonsAndClusterID(ctx, clusterID, checkClusterIDMapping)
|
2020-07-10 10:44:59 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed getting mons (%s)", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-10 10:44:59 +00:00
|
|
|
return nil, err
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2019-03-02 17:29:52 +00:00
|
|
|
|
2021-09-16 13:47:57 +00:00
|
|
|
rbdVol.RadosNamespace, err = util.GetRadosNamespace(util.CsiConfigFile, rbdVol.ClusterID)
|
2020-06-01 13:57:51 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-03-11 06:28:48 +00:00
|
|
|
if rbdVol.Mounter, ok = volOptions["mounter"]; !ok {
|
|
|
|
rbdVol.Mounter = rbdDefaultMounter
|
|
|
|
}
|
2019-09-11 07:08:55 +00:00
|
|
|
// if no image features is provided, it results in empty string
|
2020-01-10 09:09:49 +00:00
|
|
|
// which disable all RBD image features as we expected
|
2021-03-11 06:28:48 +00:00
|
|
|
if err = rbdVol.validateImageFeatures(volOptions["imageFeatures"]); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to validate image features %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-03-11 06:28:48 +00:00
|
|
|
return nil, err
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2019-03-14 00:18:04 +00:00
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ExtendedLog(
|
2021-06-25 11:52:34 +00:00
|
|
|
ctx,
|
|
|
|
"setting disableInUseChecks: %t image features: %v mounter: %s",
|
|
|
|
disableInUseChecks,
|
|
|
|
rbdVol.imageFeatureSet.Names(),
|
|
|
|
rbdVol.Mounter)
|
2019-03-14 00:18:04 +00:00
|
|
|
rbdVol.DisableInUseChecks = disableInUseChecks
|
|
|
|
|
2021-02-15 07:26:35 +00:00
|
|
|
err = rbdVol.initKMS(ctx, volOptions, credentials)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-12-13 11:41:32 +00:00
|
|
|
}
|
|
|
|
|
2019-06-01 21:26:42 +00:00
|
|
|
return rbdVol, nil
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2019-03-02 17:29:52 +00:00
|
|
|
|
2021-03-11 06:28:48 +00:00
|
|
|
func (rv *rbdVolume) validateImageFeatures(imageFeatures string) error {
|
2021-06-05 14:59:50 +00:00
|
|
|
// It is possible for image features to be an empty string which
|
|
|
|
// the Go split function would return a single item array with
|
|
|
|
// an empty string, causing a failure when trying to validate
|
|
|
|
// the features.
|
2021-06-15 03:54:54 +00:00
|
|
|
if imageFeatures == "" {
|
2021-06-05 14:59:50 +00:00
|
|
|
return nil
|
|
|
|
}
|
2021-03-11 06:28:48 +00:00
|
|
|
arr := strings.Split(imageFeatures, ",")
|
|
|
|
featureSet := sets.NewString(arr...)
|
|
|
|
for _, f := range arr {
|
|
|
|
sf, found := supportedFeatures[f]
|
|
|
|
if !found {
|
|
|
|
return fmt.Errorf("invalid feature %s", f)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, r := range sf.dependsOn {
|
|
|
|
if !featureSet.Has(r) {
|
|
|
|
return fmt.Errorf("feature %s requires %s to be set", f, r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if sf.needRbdNbd && rv.Mounter != rbdNbdMounter {
|
|
|
|
return fmt.Errorf("feature %s requires rbd-nbd for mounter", f)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rv.imageFeatureSet = librbd.FeatureSetFromNames(arr)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-03-11 06:28:48 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-30 08:54:15 +00:00
|
|
|
func genSnapFromOptions(ctx context.Context, rbdVol *rbdVolume, snapOptions map[string]string) (*rbdSnapshot, error) {
|
2019-05-31 18:43:38 +00:00
|
|
|
var err error
|
2019-03-02 17:29:52 +00:00
|
|
|
|
2018-08-08 05:42:17 +00:00
|
|
|
rbdSnap := &rbdSnapshot{}
|
2019-05-31 18:43:38 +00:00
|
|
|
rbdSnap.Pool = rbdVol.Pool
|
2020-01-24 16:26:56 +00:00
|
|
|
rbdSnap.JournalPool = rbdVol.JournalPool
|
2020-06-01 13:57:51 +00:00
|
|
|
rbdSnap.RadosNamespace = rbdVol.RadosNamespace
|
2019-03-02 17:29:52 +00:00
|
|
|
|
2021-09-07 06:05:11 +00:00
|
|
|
clusterID, err := util.GetClusterID(snapOptions)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
rbdSnap.Monitors, rbdSnap.ClusterID, err = util.GetMonsAndClusterID(ctx, clusterID, false)
|
2019-03-02 17:29:52 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed getting mons (%s)", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-30 08:54:15 +00:00
|
|
|
return nil, err
|
2018-08-09 13:07:00 +00:00
|
|
|
}
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2020-02-24 13:19:42 +00:00
|
|
|
if namePrefix, ok := snapOptions["snapshotNamePrefix"]; ok {
|
|
|
|
rbdSnap.NamePrefix = namePrefix
|
|
|
|
}
|
|
|
|
|
2020-07-30 08:54:15 +00:00
|
|
|
return rbdSnap, nil
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// hasSnapshotFeature checks if Layering is enabled for this image.
|
2020-06-18 11:33:06 +00:00
|
|
|
func (rv *rbdVolume) hasSnapshotFeature() bool {
|
2020-06-18 12:07:21 +00:00
|
|
|
return (uint64(rv.imageFeatureSet) & librbd.FeatureLayering) == librbd.FeatureLayering
|
2020-01-07 13:45:52 +00:00
|
|
|
}
|
|
|
|
|
2020-06-24 07:06:29 +00:00
|
|
|
func (rv *rbdVolume) createSnapshot(ctx context.Context, pOpts *rbdSnapshot) error {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "rbd: snap create %s using mon %s", pOpts, pOpts.Monitors)
|
2020-06-24 07:06:29 +00:00
|
|
|
image, err := rv.open()
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
2020-06-24 07:06:29 +00:00
|
|
|
return err
|
2019-02-18 08:22:52 +00:00
|
|
|
}
|
2020-06-24 07:06:29 +00:00
|
|
|
defer image.Close()
|
2019-02-18 08:22:52 +00:00
|
|
|
|
2020-06-24 07:06:29 +00:00
|
|
|
_, err = image.CreateSnapshot(pOpts.RbdSnapName)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:06:29 +00:00
|
|
|
return err
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
2020-06-24 07:06:29 +00:00
|
|
|
func (rv *rbdVolume) deleteSnapshot(ctx context.Context, pOpts *rbdSnapshot) error {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "rbd: snap rm %s using mon %s", pOpts, pOpts.Monitors)
|
2020-06-24 07:06:29 +00:00
|
|
|
image, err := rv.open()
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
2020-06-24 07:06:29 +00:00
|
|
|
return err
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
2020-06-24 07:06:29 +00:00
|
|
|
defer image.Close()
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2020-06-24 07:06:29 +00:00
|
|
|
snap := image.GetSnapshot(pOpts.RbdSnapName)
|
|
|
|
if snap == nil {
|
2020-06-25 11:30:04 +00:00
|
|
|
return fmt.Errorf("snapshot value is nil for %s", pOpts.RbdSnapName)
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
2020-06-24 07:06:29 +00:00
|
|
|
err = snap.Remove()
|
2020-07-02 21:43:40 +00:00
|
|
|
if errors.Is(err, librbd.ErrNotFound) {
|
2020-07-10 01:05:42 +00:00
|
|
|
return util.JoinErrors(ErrSnapNotFound, err)
|
2020-06-24 07:06:29 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:06:29 +00:00
|
|
|
return err
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
|
2021-06-25 11:52:34 +00:00
|
|
|
func (rv *rbdVolume) cloneRbdImageFromSnapshot(
|
|
|
|
ctx context.Context,
|
|
|
|
pSnapOpts *rbdSnapshot,
|
|
|
|
parentVol *rbdVolume) error {
|
2020-06-24 07:17:21 +00:00
|
|
|
var err error
|
2020-07-20 04:33:55 +00:00
|
|
|
logMsg := "rbd: clone %s %s (features: %s) using mon %s"
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2021-03-18 11:23:08 +00:00
|
|
|
err = parentVol.openIoctx()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to get parent IOContext: %w", err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
defer parentVol.ioctx.Destroy()
|
|
|
|
parentVol.ioctx = nil
|
|
|
|
}()
|
|
|
|
|
2020-06-24 07:17:21 +00:00
|
|
|
options := librbd.NewRbdImageOptions()
|
|
|
|
defer options.Destroy()
|
2020-07-20 04:33:55 +00:00
|
|
|
|
|
|
|
if rv.DataPool != "" {
|
|
|
|
logMsg += fmt.Sprintf(", data pool %s", rv.DataPool)
|
|
|
|
err = options.SetString(librbd.RbdImageOptionDataPool, rv.DataPool)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to set data pool: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, logMsg,
|
2021-03-18 11:23:08 +00:00
|
|
|
pSnapOpts, rv, rv.imageFeatureSet.Names(), rv.Monitors)
|
2020-07-20 04:33:55 +00:00
|
|
|
|
2020-06-24 07:17:21 +00:00
|
|
|
if rv.imageFeatureSet != 0 {
|
|
|
|
err = options.SetUint64(librbd.RbdImageOptionFeatures, uint64(rv.imageFeatureSet))
|
|
|
|
if err != nil {
|
2020-06-25 11:30:04 +00:00
|
|
|
return fmt.Errorf("failed to set image features: %w", err)
|
2020-06-24 07:17:21 +00:00
|
|
|
}
|
|
|
|
}
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2020-07-21 05:10:13 +00:00
|
|
|
err = options.SetUint64(librbd.ImageOptionCloneFormat, 2)
|
2020-06-24 07:17:21 +00:00
|
|
|
if err != nil {
|
2020-06-25 11:30:04 +00:00
|
|
|
return fmt.Errorf("failed to set image features: %w", err)
|
2020-06-24 07:17:21 +00:00
|
|
|
}
|
|
|
|
|
2021-03-18 11:23:08 +00:00
|
|
|
// As the clone is yet to be created, open the Ioctx.
|
2020-06-24 07:17:21 +00:00
|
|
|
err = rv.openIoctx()
|
|
|
|
if err != nil {
|
2020-06-25 11:30:04 +00:00
|
|
|
return fmt.Errorf("failed to get IOContext: %w", err)
|
2020-06-24 07:17:21 +00:00
|
|
|
}
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2021-06-25 11:52:34 +00:00
|
|
|
err = librbd.CloneImage(
|
|
|
|
parentVol.ioctx,
|
|
|
|
pSnapOpts.RbdImageName,
|
|
|
|
pSnapOpts.RbdSnapName,
|
|
|
|
rv.ioctx,
|
|
|
|
rv.RbdImageName,
|
|
|
|
options)
|
2018-08-08 05:42:17 +00:00
|
|
|
if err != nil {
|
2020-06-25 11:30:04 +00:00
|
|
|
return fmt.Errorf("failed to create rbd clone: %w", err)
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
2021-01-26 15:47:14 +00:00
|
|
|
// delete the cloned image if a next step fails
|
|
|
|
deleteClone := true
|
|
|
|
defer func() {
|
|
|
|
if deleteClone {
|
|
|
|
err = librbd.RemoveImage(rv.ioctx, rv.RbdImageName)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to delete temporary image %q: %v", rv, err)
|
2021-01-26 15:47:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
if pSnapOpts.isEncrypted() {
|
|
|
|
pSnapOpts.conn = rv.conn.Copy()
|
|
|
|
|
2021-09-28 05:06:20 +00:00
|
|
|
err = pSnapOpts.copyEncryptionConfig(&rv.rbdImage, true)
|
2021-01-26 15:47:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to clone encryption config: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Success! Do not delete the cloned image now :)
|
|
|
|
deleteClone = false
|
|
|
|
|
2018-08-08 05:42:17 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// getImageInfo queries rbd about the given image and returns its metadata, and returns
|
2020-07-19 12:21:03 +00:00
|
|
|
// ErrImageNotFound if provided image is not found.
|
2020-01-15 13:06:03 +00:00
|
|
|
func (rv *rbdVolume) getImageInfo() error {
|
|
|
|
image, err := rv.open()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer image.Close()
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2020-01-15 13:06:03 +00:00
|
|
|
imageInfo, err := image.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2018-09-21 14:38:50 +00:00
|
|
|
}
|
2020-01-15 13:06:03 +00:00
|
|
|
// TODO: can rv.VolSize not be a uint64? Or initialize it to -1?
|
|
|
|
rv.VolSize = int64(imageInfo.Size)
|
2018-09-21 14:38:50 +00:00
|
|
|
|
2020-01-15 13:06:03 +00:00
|
|
|
features, err := image.GetFeatures()
|
2018-08-08 05:42:17 +00:00
|
|
|
if err != nil {
|
2020-01-15 13:06:03 +00:00
|
|
|
return err
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
2020-06-18 12:07:21 +00:00
|
|
|
rv.imageFeatureSet = librbd.FeatureSet(features)
|
2020-08-23 08:29:44 +00:00
|
|
|
|
|
|
|
// Get parent information.
|
2020-11-02 11:55:41 +00:00
|
|
|
parentInfo, err := image.GetParent()
|
2020-08-23 08:29:44 +00:00
|
|
|
if err != nil {
|
|
|
|
// Caller should decide whether not finding
|
|
|
|
// the parent is an error or not.
|
2020-11-02 11:55:41 +00:00
|
|
|
if errors.Is(err, librbd.ErrNotFound) {
|
2020-08-23 08:29:44 +00:00
|
|
|
rv.ParentName = ""
|
|
|
|
} else {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
2020-11-02 11:55:41 +00:00
|
|
|
rv.ParentName = parentInfo.Image.ImageName
|
2021-03-17 10:55:49 +00:00
|
|
|
rv.ParentPool = parentInfo.Image.PoolName
|
2020-08-23 08:29:44 +00:00
|
|
|
}
|
|
|
|
// Get image creation time
|
|
|
|
tm, err := image.GetCreateTimestamp()
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-08-23 08:29:44 +00:00
|
|
|
t := time.Unix(tm.Sec, tm.Nsec)
|
|
|
|
protoTime, err := ptypes.TimestampProto(t)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
rv.CreatedAt = protoTime
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-01-15 13:06:03 +00:00
|
|
|
return nil
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
2020-10-21 05:34:03 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
/*
|
2020-07-07 12:14:19 +00:00
|
|
|
checkSnapExists queries rbd about the snapshots of the given image and returns
|
|
|
|
ErrImageNotFound if provided image is not found, and ErrSnapNotFound if
|
2020-07-19 12:21:03 +00:00
|
|
|
provided snap is not found in the images snapshot list.
|
2019-04-22 21:35:39 +00:00
|
|
|
*/
|
2020-07-07 12:14:19 +00:00
|
|
|
func (rv *rbdVolume) checkSnapExists(rbdSnap *rbdSnapshot) error {
|
2020-06-24 07:14:23 +00:00
|
|
|
image, err := rv.open()
|
|
|
|
if err != nil {
|
2020-07-07 12:14:19 +00:00
|
|
|
return err
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
2020-06-24 07:14:23 +00:00
|
|
|
defer image.Close()
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2020-06-24 07:14:23 +00:00
|
|
|
snaps, err := image.GetSnapshotNames()
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
2020-07-07 12:14:19 +00:00
|
|
|
return err
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, snap := range snaps {
|
2020-06-24 07:14:23 +00:00
|
|
|
if snap.Name == rbdSnap.RbdSnapName {
|
2020-07-07 12:14:19 +00:00
|
|
|
return nil
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-07 05:30:37 +00:00
|
|
|
return fmt.Errorf("%w: snap %s not found", ErrSnapNotFound, rbdSnap)
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
2019-08-03 22:11:28 +00:00
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// rbdImageMetadataStash strongly typed JSON spec for stashed RBD image metadata.
|
2019-08-03 22:11:28 +00:00
|
|
|
type rbdImageMetadataStash struct {
|
2020-06-01 13:57:51 +00:00
|
|
|
Version int `json:"Version"`
|
|
|
|
Pool string `json:"pool"`
|
|
|
|
RadosNamespace string `json:"radosNamespace"`
|
|
|
|
ImageName string `json:"image"`
|
2020-09-08 05:23:28 +00:00
|
|
|
UnmapOptions string `json:"unmapOptions"`
|
2020-06-01 13:57:51 +00:00
|
|
|
NbdAccess bool `json:"accessType"`
|
|
|
|
Encrypted bool `json:"encrypted"`
|
2021-09-01 11:53:43 +00:00
|
|
|
DevicePath string `json:"device"` // holds NBD device path for now
|
|
|
|
LogDir string `json:"logDir"` // holds the client log path
|
|
|
|
LogStrategy string `json:"logFileStrategy"` // ceph client log strategy
|
2019-08-03 22:11:28 +00:00
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// file name in which image metadata is stashed.
|
2019-08-03 22:11:28 +00:00
|
|
|
const stashFileName = "image-meta.json"
|
|
|
|
|
2020-06-01 13:57:51 +00:00
|
|
|
// spec returns the image-spec (pool/{namespace/}image) format of the image.
|
2020-05-28 18:39:44 +00:00
|
|
|
func (ri *rbdImageMetadataStash) String() string {
|
2020-06-01 13:57:51 +00:00
|
|
|
if ri.RadosNamespace != "" {
|
|
|
|
return fmt.Sprintf("%s/%s/%s", ri.Pool, ri.RadosNamespace, ri.ImageName)
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-05-28 18:39:44 +00:00
|
|
|
return fmt.Sprintf("%s/%s", ri.Pool, ri.ImageName)
|
|
|
|
}
|
|
|
|
|
2019-08-03 22:11:28 +00:00
|
|
|
// stashRBDImageMetadata stashes required fields into the stashFileName at the passed in path, in
|
2020-07-19 12:21:03 +00:00
|
|
|
// JSON format.
|
2021-06-04 06:24:52 +00:00
|
|
|
func stashRBDImageMetadata(volOptions *rbdVolume, metaDataPath string) error {
|
2021-07-13 12:21:05 +00:00
|
|
|
imgMeta := rbdImageMetadataStash{
|
2020-07-21 05:10:13 +00:00
|
|
|
// there are no checks for this at present
|
2020-09-08 05:23:28 +00:00
|
|
|
Version: 3, // nolint:gomnd // number specifies version.
|
2020-06-01 13:57:51 +00:00
|
|
|
Pool: volOptions.Pool,
|
|
|
|
RadosNamespace: volOptions.RadosNamespace,
|
|
|
|
ImageName: volOptions.RbdImageName,
|
2021-02-22 15:25:35 +00:00
|
|
|
Encrypted: volOptions.isEncrypted(),
|
2020-09-08 05:23:28 +00:00
|
|
|
UnmapOptions: volOptions.UnmapOptions,
|
2019-08-03 22:11:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
imgMeta.NbdAccess = false
|
|
|
|
if volOptions.Mounter == rbdTonbd && hasNBD {
|
|
|
|
imgMeta.NbdAccess = true
|
2021-08-20 01:06:35 +00:00
|
|
|
imgMeta.LogDir = volOptions.LogDir
|
2021-09-01 11:53:43 +00:00
|
|
|
imgMeta.LogStrategy = volOptions.LogStrategy
|
2019-08-03 22:11:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
encodedBytes, err := json.Marshal(imgMeta)
|
|
|
|
if err != nil {
|
2020-12-08 14:09:33 +00:00
|
|
|
return fmt.Errorf("failed to marshall JSON image metadata for image (%s): %w", volOptions, err)
|
2019-08-03 22:11:28 +00:00
|
|
|
}
|
|
|
|
|
2021-06-04 06:24:52 +00:00
|
|
|
fPath := filepath.Join(metaDataPath, stashFileName)
|
2021-07-13 12:21:05 +00:00
|
|
|
err = ioutil.WriteFile(fPath, encodedBytes, 0o600)
|
2019-08-03 22:11:28 +00:00
|
|
|
if err != nil {
|
2020-12-08 14:09:33 +00:00
|
|
|
return fmt.Errorf("failed to stash JSON image metadata for image (%s) at path (%s): %w", volOptions, fPath, err)
|
2019-08-03 22:11:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// lookupRBDImageMetadataStash reads and returns stashed image metadata at passed in path.
|
2021-06-04 06:24:52 +00:00
|
|
|
func lookupRBDImageMetadataStash(metaDataPath string) (rbdImageMetadataStash, error) {
|
2019-08-03 22:11:28 +00:00
|
|
|
var imgMeta rbdImageMetadataStash
|
|
|
|
|
2021-06-04 06:24:52 +00:00
|
|
|
fPath := filepath.Join(metaDataPath, stashFileName)
|
Address security concerns reported by 'gosec'
gosec reports several issues, none of them looks very critical. With
this change the following concerns have been addressed:
[pkg/cephfs/nodeserver.go:229] - G302: Expect file permissions to be 0600 or less (Confidence: HIGH, Severity: MEDIUM)
> os.Chmod(targetPath, 0777)
[pkg/cephfs/util.go:39] - G204: Subprocess launched with variable (Confidence: HIGH, Severity: MEDIUM)
> exec.Command(program, args...)
[pkg/rbd/nodeserver.go:156] - G302: Expect file permissions to be 0600 or less (Confidence: HIGH, Severity: MEDIUM)
> os.Chmod(stagingTargetPath, 0777)
[pkg/rbd/nodeserver.go:205] - G302: Expect file permissions to be 0600 or less (Confidence: HIGH, Severity: MEDIUM)
> os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0750)
[pkg/rbd/rbd_util.go:797] - G304: Potential file inclusion via variable (Confidence: HIGH, Severity: MEDIUM)
> ioutil.ReadFile(fPath)
[pkg/util/cephcmds.go:35] - G204: Subprocess launched with variable (Confidence: HIGH, Severity: MEDIUM)
> exec.Command(program, args...)
[pkg/util/credentials.go:47] - G104: Errors unhandled. (Confidence: HIGH, Severity: LOW)
> os.Remove(tmpfile.Name())
[pkg/util/credentials.go:92] - G104: Errors unhandled. (Confidence: HIGH, Severity: LOW)
> os.Remove(cr.KeyFile)
[pkg/util/pidlimit.go:74] - G304: Potential file inclusion via variable (Confidence: HIGH, Severity: MEDIUM)
> os.Open(pidsMax)
URL: https://github.com/securego/gosec
Signed-off-by: Niels de Vos <ndevos@redhat.com>
2019-08-30 10:23:10 +00:00
|
|
|
encodedBytes, err := ioutil.ReadFile(fPath) // #nosec - intended reading from fPath
|
2019-08-03 22:11:28 +00:00
|
|
|
if err != nil {
|
|
|
|
if !os.IsNotExist(err) {
|
2020-12-08 14:09:59 +00:00
|
|
|
return imgMeta, fmt.Errorf("failed to read stashed JSON image metadata from path (%s): %w", fPath, err)
|
2019-08-03 22:11:28 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 01:05:42 +00:00
|
|
|
return imgMeta, util.JoinErrors(ErrMissingStash, err)
|
2019-08-03 22:11:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = json.Unmarshal(encodedBytes, &imgMeta)
|
|
|
|
if err != nil {
|
2020-12-08 14:09:59 +00:00
|
|
|
return imgMeta, fmt.Errorf("failed to unmarshall stashed JSON image metadata from path (%s): %w", fPath, err)
|
2019-08-03 22:11:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return imgMeta, nil
|
|
|
|
}
|
|
|
|
|
2021-05-31 11:09:36 +00:00
|
|
|
// updateRBDImageMetadataStash reads and updates stashFile with the required
|
|
|
|
// fields at the passed in path, in JSON format.
|
|
|
|
func updateRBDImageMetadataStash(metaDataPath, device string) error {
|
|
|
|
if device == "" {
|
|
|
|
return errors.New("device is empty")
|
|
|
|
}
|
|
|
|
imgMeta, err := lookupRBDImageMetadataStash(metaDataPath)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to find image metadata: %w", err)
|
|
|
|
}
|
|
|
|
imgMeta.DevicePath = device
|
|
|
|
|
|
|
|
encodedBytes, err := json.Marshal(imgMeta)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to marshal JSON image metadata for spec:(%s) : %w", imgMeta.String(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
fPath := filepath.Join(metaDataPath, stashFileName)
|
|
|
|
err = ioutil.WriteFile(fPath, encodedBytes, 0600)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to stash JSON image metadata at path: (%s) for spec:(%s) : %w",
|
|
|
|
fPath, imgMeta.String(), err)
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-05-31 11:09:36 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// cleanupRBDImageMetadataStash cleans up any stashed metadata at passed in path.
|
2021-06-04 06:24:52 +00:00
|
|
|
func cleanupRBDImageMetadataStash(metaDataPath string) error {
|
|
|
|
fPath := filepath.Join(metaDataPath, stashFileName)
|
2019-08-03 22:11:28 +00:00
|
|
|
if err := os.Remove(fPath); err != nil {
|
2020-12-08 14:10:22 +00:00
|
|
|
return fmt.Errorf("failed to cleanup stashed JSON data (%s): %w", fPath, err)
|
2019-08-03 22:11:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2019-11-27 12:14:31 +00:00
|
|
|
|
2020-07-23 08:01:21 +00:00
|
|
|
// resize the given volume to new size.
|
2020-07-30 06:28:51 +00:00
|
|
|
// updates Volsize of rbdVolume object to newSize in case of success.
|
|
|
|
func (rv *rbdVolume) resize(newSize int64) error {
|
|
|
|
image, err := rv.open()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer image.Close()
|
2019-11-27 12:14:31 +00:00
|
|
|
|
2021-01-28 13:59:48 +00:00
|
|
|
thick, err := rv.isThickProvisioned()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// offset is used to track from where on the expansion is done, so that
|
|
|
|
// the extents can be allocated in case the image is thick-provisioned
|
|
|
|
var offset uint64
|
|
|
|
if thick {
|
|
|
|
st, statErr := image.Stat()
|
|
|
|
if statErr != nil {
|
|
|
|
return statErr
|
|
|
|
}
|
|
|
|
|
|
|
|
offset = st.Size
|
|
|
|
}
|
|
|
|
|
2020-07-30 06:28:51 +00:00
|
|
|
err = image.Resize(uint64(util.RoundOffVolSize(newSize) * helpers.MiB))
|
2019-11-27 12:14:31 +00:00
|
|
|
if err != nil {
|
2020-07-30 06:28:51 +00:00
|
|
|
return err
|
2019-11-27 12:14:31 +00:00
|
|
|
}
|
|
|
|
|
2021-01-28 13:59:48 +00:00
|
|
|
if thick {
|
|
|
|
err = rv.allocate(offset)
|
|
|
|
if err != nil {
|
|
|
|
resizeErr := image.Resize(offset)
|
|
|
|
if resizeErr != nil {
|
|
|
|
err = fmt.Errorf("failed to shrink image (%v) after failed allocation: %w", resizeErr, err)
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-01-28 13:59:48 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-30 06:28:51 +00:00
|
|
|
// update Volsize of rbdVolume object to newSize.
|
|
|
|
rv.VolSize = newSize
|
2021-01-28 13:59:48 +00:00
|
|
|
|
2019-11-27 12:14:31 +00:00
|
|
|
return nil
|
|
|
|
}
|
2019-12-13 11:41:32 +00:00
|
|
|
|
2021-03-12 12:37:15 +00:00
|
|
|
func (ri *rbdImage) GetMetadata(key string) (string, error) {
|
|
|
|
image, err := ri.open()
|
2019-12-13 11:41:32 +00:00
|
|
|
if err != nil {
|
2020-05-12 14:56:08 +00:00
|
|
|
return "", err
|
2019-12-13 11:41:32 +00:00
|
|
|
}
|
2020-01-09 10:31:07 +00:00
|
|
|
defer image.Close()
|
|
|
|
|
|
|
|
return image.GetMetadata(key)
|
|
|
|
}
|
2019-12-13 11:41:32 +00:00
|
|
|
|
2021-03-12 12:37:15 +00:00
|
|
|
func (ri *rbdImage) SetMetadata(key, value string) error {
|
|
|
|
image, err := ri.open()
|
2020-01-09 10:31:07 +00:00
|
|
|
if err != nil {
|
2020-05-12 14:56:08 +00:00
|
|
|
return err
|
2020-01-09 10:31:07 +00:00
|
|
|
}
|
|
|
|
defer image.Close()
|
|
|
|
|
|
|
|
return image.SetMetadata(key, value)
|
|
|
|
}
|
|
|
|
|
2021-07-23 14:46:20 +00:00
|
|
|
// RemoveMetadata deletes the key and data from the metadata of the image.
|
|
|
|
func (ri *rbdImage) RemoveMetadata(key string) error {
|
|
|
|
image, err := ri.open()
|
2021-01-28 08:45:51 +00:00
|
|
|
if err != nil {
|
2021-07-23 14:46:20 +00:00
|
|
|
return err
|
2021-01-28 08:45:51 +00:00
|
|
|
}
|
2021-07-23 14:46:20 +00:00
|
|
|
defer image.Close()
|
2021-01-28 08:45:51 +00:00
|
|
|
|
2021-07-23 14:46:20 +00:00
|
|
|
return image.RemoveMetadata(key)
|
|
|
|
}
|
|
|
|
|
|
|
|
// MigrateMetadata reads the metadata contents from oldKey and stores it in
|
|
|
|
// newKey. In case oldKey was not set, the defaultValue is stored in newKey.
|
|
|
|
// Once done, oldKey will be removed as well.
|
|
|
|
func (ri *rbdImage) MigrateMetadata(oldKey, newKey, defaultValue string) (string, error) {
|
|
|
|
value, err := ri.GetMetadata(newKey)
|
|
|
|
if err == nil {
|
|
|
|
return value, nil
|
|
|
|
} else if !errors.Is(err, librbd.ErrNotFound) {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// migrate contents from oldKey to newKey
|
|
|
|
removeOldKey := true
|
|
|
|
value, err = ri.GetMetadata(oldKey)
|
|
|
|
if errors.Is(err, librbd.ErrNotFound) {
|
|
|
|
// in case oldKey was not set, set newKey to defaultValue
|
|
|
|
value = defaultValue
|
|
|
|
removeOldKey = false
|
|
|
|
} else if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// newKey was not set, set it now to prevent regular error cases for missing metadata
|
|
|
|
err = ri.SetMetadata(newKey, value)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// the newKey was set with data from oldKey, oldKey is not needed anymore
|
|
|
|
if removeOldKey {
|
|
|
|
err = ri.RemoveMetadata(oldKey)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return value, nil
|
2021-01-28 08:45:51 +00:00
|
|
|
}
|
|
|
|
|
2021-07-23 14:46:20 +00:00
|
|
|
// setThickProvisioned records in the image metadata that it has been
|
|
|
|
// thick-provisioned.
|
|
|
|
func (ri *rbdImage) setThickProvisioned() error {
|
|
|
|
err := ri.SetMetadata(thickProvisionMetaKey, thickProvisionMetaData)
|
2021-07-07 05:58:40 +00:00
|
|
|
if err != nil {
|
2021-07-23 14:46:20 +00:00
|
|
|
return fmt.Errorf("failed to set metadata %q for %q: %w", thickProvisionMetaKey, ri, err)
|
2021-07-07 05:58:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-01-28 08:45:51 +00:00
|
|
|
// isThickProvisioned checks in the image metadata if the image has been marked
|
|
|
|
// as thick-provisioned. This can be used while expanding the image, so that
|
|
|
|
// the expansion can be allocated too.
|
2021-06-17 06:11:29 +00:00
|
|
|
func (ri *rbdImage) isThickProvisioned() (bool, error) {
|
2021-07-23 14:46:20 +00:00
|
|
|
value, err := ri.MigrateMetadata(deprecatedthickProvisionMetaKey, thickProvisionMetaKey, thinProvisionMetaData)
|
2021-06-30 05:11:49 +00:00
|
|
|
if err != nil {
|
2021-06-17 06:11:29 +00:00
|
|
|
return false, fmt.Errorf("failed to get metadata %q for %q: %w", thickProvisionMetaKey, ri, err)
|
2021-01-28 08:45:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
thick, err := strconv.ParseBool(value)
|
|
|
|
if err != nil {
|
|
|
|
return false, fmt.Errorf("failed to convert %q=%q to a boolean: %w", thickProvisionMetaKey, value, err)
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-01-28 08:45:51 +00:00
|
|
|
return thick, nil
|
|
|
|
}
|
|
|
|
|
2021-05-26 08:29:32 +00:00
|
|
|
// RepairThickProvision writes zero bytes to the volume so that it will be
|
|
|
|
// completely allocated. In case the volume is already marked as
|
|
|
|
// thick-provisioned, nothing will be done.
|
|
|
|
func (rv *rbdVolume) RepairThickProvision() error {
|
|
|
|
// if the image has the thick-provisioned metadata, it has been fully
|
|
|
|
// allocated
|
|
|
|
done, err := rv.isThickProvisioned()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to repair thick-provisioning of %q: %w", rv, err)
|
|
|
|
} else if done {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// in case there are watchers, assume allocating is still happening in
|
|
|
|
// the background (by an other process?)
|
|
|
|
background, err := rv.isInUse()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to get users of %q: %w", rv, err)
|
|
|
|
} else if background {
|
|
|
|
return fmt.Errorf("not going to restart thick-provisioning of in-use image %q", rv)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: can this be improved by starting at the offset where
|
|
|
|
// allocating was aborted/restarted?
|
|
|
|
err = rv.allocate(0)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to continue thick-provisioning of %q: %w", rv, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = rv.setThickProvisioned()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to continue thick-provisioning of %q: %w", rv, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-05-27 07:09:49 +00:00
|
|
|
// DeepCopy creates an independent image (dest) from the source image. This
|
|
|
|
// process may take some time when the image is large.
|
|
|
|
func (rv *rbdVolume) DeepCopy(dest *rbdVolume) error {
|
|
|
|
opts := librbd.NewRbdImageOptions()
|
|
|
|
defer opts.Destroy()
|
|
|
|
|
|
|
|
// when doing DeepCopy, also flatten the new image
|
|
|
|
err := opts.SetUint64(librbd.ImageOptionFlatten, 1)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dest.openIoctx()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
image, err := rv.open()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer image.Close()
|
|
|
|
|
2021-06-16 15:19:55 +00:00
|
|
|
err = image.DeepCopy(dest.ioctx, dest.RbdImageName, opts)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// deep-flatten is not supported by all clients, so disable it
|
|
|
|
return dest.DisableDeepFlatten()
|
|
|
|
}
|
|
|
|
|
|
|
|
// DisableDeepFlatten removed the deep-flatten feature from the image.
|
|
|
|
func (rv *rbdVolume) DisableDeepFlatten() error {
|
|
|
|
image, err := rv.open()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer image.Close()
|
|
|
|
|
|
|
|
return image.UpdateFeatures(librbd.FeatureDeepFlatten, false)
|
2021-05-27 07:09:49 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 11:09:45 +00:00
|
|
|
func (rv *rbdVolume) listSnapshots() ([]librbd.SnapInfo, error) {
|
|
|
|
image, err := rv.open()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer image.Close()
|
|
|
|
|
|
|
|
snapInfoList, err := image.GetSnapshotNames()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-08-27 11:09:45 +00:00
|
|
|
return snapInfoList, nil
|
2020-06-25 13:00:31 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 11:09:45 +00:00
|
|
|
// isTrashSnap returns true if the snapshot belongs to trash namespace.
|
|
|
|
func (rv *rbdVolume) isTrashSnap(snapID uint64) (bool, error) {
|
|
|
|
image, err := rv.open()
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
2020-06-25 13:00:31 +00:00
|
|
|
}
|
2020-08-27 11:09:45 +00:00
|
|
|
defer image.Close()
|
2020-06-25 13:00:31 +00:00
|
|
|
|
2020-08-27 11:09:45 +00:00
|
|
|
// Get namespace type for the snapshot
|
|
|
|
nsType, err := image.GetSnapNamespaceType(snapID)
|
2020-06-25 13:00:31 +00:00
|
|
|
if err != nil {
|
2020-08-27 11:09:45 +00:00
|
|
|
return false, err
|
2020-06-25 13:00:31 +00:00
|
|
|
}
|
2020-08-27 11:09:45 +00:00
|
|
|
|
|
|
|
if nsType == librbd.SnapNamespaceTypeTrash {
|
|
|
|
return true, nil
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-08-27 11:09:45 +00:00
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// getOrigSnapName returns the original snap name for
|
|
|
|
// the snapshots in Trash Namespace.
|
|
|
|
func (rv *rbdVolume) getOrigSnapName(snapID uint64) (string, error) {
|
|
|
|
image, err := rv.open()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
defer image.Close()
|
|
|
|
|
|
|
|
origSnapName, err := image.GetSnapTrashNamespace(snapID)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return origSnapName, nil
|
2020-06-25 13:00:31 +00:00
|
|
|
}
|
2021-06-01 11:33:29 +00:00
|
|
|
|
|
|
|
func (ri *rbdImage) isCompatibleEncryption(dst *rbdImage) error {
|
|
|
|
switch {
|
|
|
|
case ri.isEncrypted() && !dst.isEncrypted():
|
2021-06-17 06:38:21 +00:00
|
|
|
return fmt.Errorf("cannot create unencrypted volume from encrypted volume %q", ri)
|
2021-06-01 11:33:29 +00:00
|
|
|
|
|
|
|
case !ri.isEncrypted() && dst.isEncrypted():
|
2021-06-17 06:38:21 +00:00
|
|
|
return fmt.Errorf("cannot create encrypted volume from unencrypted volume %q", ri)
|
2021-06-01 11:33:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2021-06-17 06:15:29 +00:00
|
|
|
|
|
|
|
func (ri *rbdImage) isCompatibleThickProvision(dst *rbdVolume) error {
|
|
|
|
thick, err := ri.isThickProvisioned()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
switch {
|
|
|
|
case thick && !dst.ThickProvision:
|
|
|
|
return fmt.Errorf("cannot create thin volume from thick volume %q", ri)
|
|
|
|
|
|
|
|
case !thick && dst.ThickProvision:
|
|
|
|
return fmt.Errorf("cannot create thick volume from thin volume %q", ri)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2021-06-17 12:51:28 +00:00
|
|
|
|
|
|
|
// FIXME: merge isCompatibleThickProvision of rbdSnapshot and rbdImage to a single
|
|
|
|
// function.
|
|
|
|
func (rs *rbdSnapshot) isCompatibleThickProvision(dst *rbdVolume) error {
|
|
|
|
// During CreateSnapshot the rbd image will be created with the
|
|
|
|
// snapshot name. Replacing RbdImageName with RbdSnapName so that we
|
|
|
|
// can check if the image is thick provisioned
|
|
|
|
vol := generateVolFromSnap(rs)
|
|
|
|
err := vol.Connect(rs.conn.Creds)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer vol.Destroy()
|
|
|
|
|
|
|
|
thick, err := vol.isThickProvisioned()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
switch {
|
|
|
|
case thick && !dst.ThickProvision:
|
|
|
|
return fmt.Errorf("cannot create thin volume from thick volume %q", vol)
|
|
|
|
|
|
|
|
case !thick && dst.ThickProvision:
|
|
|
|
return fmt.Errorf("cannot create thick volume from thin volume %q", vol)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2021-06-28 11:38:42 +00:00
|
|
|
|
|
|
|
func (ri *rbdImage) addSnapshotScheduling(
|
|
|
|
interval admin.Interval,
|
|
|
|
startTime admin.StartTime) error {
|
|
|
|
ls := admin.NewLevelSpec(ri.Pool, ri.RadosNamespace, ri.RbdImageName)
|
|
|
|
ra, err := ri.conn.GetRBDAdmin()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
adminConn := ra.MirrorSnashotSchedule()
|
|
|
|
// list all the snapshot scheduling and check at least one image scheduling
|
|
|
|
// exists with specified interval.
|
|
|
|
ssList, err := adminConn.List(ls)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ss := range ssList {
|
|
|
|
// make sure we are matching image level scheduling. The
|
|
|
|
// `adminConn.List` lists the global level scheduling also.
|
|
|
|
if ss.Name == ri.String() {
|
|
|
|
for _, s := range ss.Schedule {
|
|
|
|
// TODO: Add support to check start time also.
|
|
|
|
// The start time is currently stored with different format
|
|
|
|
// in ceph. Comparison is not possible unless we know in
|
|
|
|
// which format ceph is storing it.
|
|
|
|
if s.Interval == interval {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err = adminConn.Add(ls, interval, startTime)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-06-28 11:38:42 +00:00
|
|
|
return nil
|
|
|
|
}
|
2021-08-18 07:21:23 +00:00
|
|
|
|
|
|
|
// getCephClientLogFileName compiles the complete log file path based on inputs.
|
|
|
|
func getCephClientLogFileName(id, logDir, prefix string) string {
|
|
|
|
if prefix == "" {
|
|
|
|
prefix = "ceph"
|
|
|
|
}
|
|
|
|
|
|
|
|
if logDir == "" {
|
|
|
|
logDir = defaultLogDir
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Sprintf("%s/%s-%s.log", logDir, prefix, id)
|
|
|
|
}
|
2021-09-01 18:43:37 +00:00
|
|
|
|
|
|
|
// CheckSliceContains checks the slice for string.
|
|
|
|
func CheckSliceContains(options []string, opt string) bool {
|
|
|
|
for _, o := range options {
|
|
|
|
if o == opt {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
2021-09-01 11:53:43 +00:00
|
|
|
|
|
|
|
// strategicActionOnLogFile act on log file based on cephLogStrategy.
|
|
|
|
func strategicActionOnLogFile(ctx context.Context, logStrategy, logFile string) {
|
|
|
|
var err error
|
|
|
|
|
|
|
|
switch strings.ToLower(logStrategy) {
|
|
|
|
case "compress":
|
|
|
|
if err = log.GzipLogFile(logFile); err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to compress logfile %q: %v", logFile, err)
|
|
|
|
}
|
|
|
|
case "remove":
|
|
|
|
if err = os.Remove(logFile); err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to remove logfile %q: %v", logFile, err)
|
|
|
|
}
|
|
|
|
case "preserve":
|
|
|
|
// do nothing
|
|
|
|
default:
|
|
|
|
log.ErrorLog(ctx, "unknown cephLogStrategy option %q: hint: 'remove'|'compress'|'preserve'", logStrategy)
|
|
|
|
}
|
|
|
|
}
|