2019-02-19 08:14:10 +00:00
|
|
|
/*
|
2019-04-03 08:46:15 +00:00
|
|
|
Copyright 2019 The Ceph-CSI Authors.
|
2019-02-19 08:14:10 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package util
|
|
|
|
|
|
|
|
import (
|
2019-08-22 17:19:06 +00:00
|
|
|
"context"
|
2020-06-25 11:30:04 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2019-09-25 08:35:33 +00:00
|
|
|
"math"
|
2019-02-19 08:14:10 +00:00
|
|
|
"os"
|
2021-04-09 12:04:38 +00:00
|
|
|
"runtime"
|
2019-03-13 05:09:58 +00:00
|
|
|
"strings"
|
2019-08-14 05:57:45 +00:00
|
|
|
"time"
|
2019-02-19 08:14:10 +00:00
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util/log"
|
|
|
|
|
2020-06-17 18:30:09 +00:00
|
|
|
"golang.org/x/sys/unix"
|
2019-03-13 05:09:58 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/validation"
|
2020-01-17 15:44:06 +00:00
|
|
|
"k8s.io/cloud-provider/volume/helpers"
|
2021-12-09 07:52:39 +00:00
|
|
|
mount "k8s.io/mount-utils"
|
2019-02-19 08:14:10 +00:00
|
|
|
)
|
|
|
|
|
2020-11-24 11:54:29 +00:00
|
|
|
// RoundOffVolSize rounds up given quantity up to chunks of MiB/GiB.
|
2019-09-25 08:35:33 +00:00
|
|
|
func RoundOffVolSize(size int64) int64 {
|
2019-10-11 08:26:10 +00:00
|
|
|
size = RoundOffBytes(size)
|
2019-09-25 08:35:33 +00:00
|
|
|
// convert size back to MiB for rbd CLI
|
2020-01-17 15:44:06 +00:00
|
|
|
return size / helpers.MiB
|
2019-09-25 08:35:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// RoundOffBytes converts roundoff the size
|
|
|
|
// 1.1Mib will be round off to 2Mib same for GiB
|
2020-07-19 12:21:03 +00:00
|
|
|
// size less than 1MiB will be round off to 1MiB.
|
2019-09-25 08:35:33 +00:00
|
|
|
func RoundOffBytes(bytes int64) int64 {
|
|
|
|
var num int64
|
|
|
|
// round off the value if its in decimal
|
2021-05-06 09:49:27 +00:00
|
|
|
if floatBytes := float64(bytes); floatBytes < helpers.GiB {
|
2020-01-17 15:44:06 +00:00
|
|
|
num = int64(math.Ceil(floatBytes / helpers.MiB))
|
|
|
|
num *= helpers.MiB
|
2019-09-25 08:35:33 +00:00
|
|
|
} else {
|
2020-01-17 15:44:06 +00:00
|
|
|
num = int64(math.Ceil(floatBytes / helpers.GiB))
|
|
|
|
num *= helpers.GiB
|
2019-09-25 08:35:33 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-25 08:35:33 +00:00
|
|
|
return num
|
2019-03-01 12:08:17 +00:00
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// variables which will be set during the build time.
|
2019-07-12 10:18:00 +00:00
|
|
|
var (
|
2021-07-08 14:59:34 +00:00
|
|
|
// GitCommit tell the latest git commit image is built from.
|
2019-07-12 10:18:00 +00:00
|
|
|
GitCommit string
|
2021-07-08 14:59:34 +00:00
|
|
|
// DriverVersion which will be driver version.
|
2019-07-12 10:18:00 +00:00
|
|
|
DriverVersion string
|
|
|
|
)
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// Config holds the parameters list which can be configured.
|
2019-08-14 05:57:45 +00:00
|
|
|
type Config struct {
|
2020-11-17 07:38:54 +00:00
|
|
|
Vtype string // driver type [rbd|cephfs|liveness|controller]
|
|
|
|
Endpoint string // CSI endpoint
|
|
|
|
DriverName string // name of the driver
|
|
|
|
DriverNamespace string // namespace in which driver is deployed
|
|
|
|
NodeID string // node id
|
|
|
|
InstanceID string // unique ID distinguishing this instance of Ceph CSI
|
|
|
|
PluginPath string // location of cephcsi plugin
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
StagingPath string // location of cephcsi staging path
|
2020-11-17 07:38:54 +00:00
|
|
|
DomainLabels string // list of domain labels to read from the node
|
2019-08-14 05:57:45 +00:00
|
|
|
|
2019-08-21 09:28:02 +00:00
|
|
|
// metrics related flags
|
2021-06-25 12:15:08 +00:00
|
|
|
MetricsPath string // path of prometheus endpoint where metrics will be available
|
|
|
|
HistogramOption string // Histogram option for grpc metrics, should be comma separated value,
|
|
|
|
// ex:= "0.5,2,6" where start=0.5 factor=2, count=6
|
2019-08-21 09:28:02 +00:00
|
|
|
MetricsIP string // TCP port for liveness/ metrics requests
|
|
|
|
PidLimit int // PID limit to configure through cgroups")
|
|
|
|
MetricsPort int // TCP port for liveness/grpc metrics requests
|
|
|
|
PollTime time.Duration // time interval in seconds between each poll
|
|
|
|
PoolTimeout time.Duration // probe timeout in seconds
|
|
|
|
EnableGRPCMetrics bool // option to enable grpc metrics
|
|
|
|
|
2021-03-25 11:14:46 +00:00
|
|
|
EnableProfiling bool // flag to enable profiling
|
2022-04-07 11:34:25 +00:00
|
|
|
IsControllerServer bool // if set to true start provisioner server
|
2019-08-21 09:28:02 +00:00
|
|
|
IsNodeServer bool // if set to true start node server
|
2019-11-06 04:52:07 +00:00
|
|
|
Version bool // cephcsi version
|
2019-10-10 10:15:44 +00:00
|
|
|
|
2020-06-24 08:12:12 +00:00
|
|
|
// SkipForceFlatten is set to false if the kernel supports mounting of
|
|
|
|
// rbd image or the image chain has the deep-flatten feature.
|
|
|
|
SkipForceFlatten bool
|
|
|
|
|
2019-10-10 10:15:44 +00:00
|
|
|
// cephfs related flags
|
|
|
|
ForceKernelCephFS bool // force to use the ceph kernel client even if the kernel is < 4.17
|
|
|
|
|
2021-06-25 12:15:08 +00:00
|
|
|
// RbdHardMaxCloneDepth is the hard limit for maximum number of nested volume clones that are taken before a flatten
|
|
|
|
// occurs
|
2020-06-24 06:44:02 +00:00
|
|
|
RbdHardMaxCloneDepth uint
|
|
|
|
|
2021-06-25 12:15:08 +00:00
|
|
|
// RbdSoftMaxCloneDepth is the soft limit for maximum number of nested volume clones that are taken before a flatten
|
|
|
|
// occurs
|
2020-06-24 06:44:02 +00:00
|
|
|
RbdSoftMaxCloneDepth uint
|
2020-07-01 05:27:11 +00:00
|
|
|
|
|
|
|
// MaxSnapshotsOnImage represents the maximum number of snapshots allowed
|
|
|
|
// on rbd image without flattening, once the limit is reached cephcsi will
|
|
|
|
// start flattening the older rbd images to allow more snapshots
|
|
|
|
MaxSnapshotsOnImage uint
|
2020-11-17 03:34:29 +00:00
|
|
|
|
|
|
|
// MinSnapshotsOnImage represents the soft limit for maximum number of
|
|
|
|
// snapshots allowed on rbd image without flattening, once the soft limit is
|
|
|
|
// reached cephcsi will start flattening the older rbd images.
|
|
|
|
MinSnapshotsOnImage uint
|
2021-11-26 19:20:03 +00:00
|
|
|
|
|
|
|
// CSI-Addons endpoint
|
|
|
|
CSIAddonsEndpoint string
|
2019-08-14 05:57:45 +00:00
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// ValidateDriverName validates the driver name.
|
2019-03-13 05:09:58 +00:00
|
|
|
func ValidateDriverName(driverName string) error {
|
2019-06-10 06:48:41 +00:00
|
|
|
if driverName == "" {
|
2019-03-13 05:09:58 +00:00
|
|
|
return errors.New("driver name is empty")
|
|
|
|
}
|
|
|
|
|
2020-07-21 05:10:13 +00:00
|
|
|
const reqDriverNameLen = 63
|
|
|
|
if len(driverName) > reqDriverNameLen {
|
2019-03-13 05:09:58 +00:00
|
|
|
return errors.New("driver name length should be less than 63 chars")
|
|
|
|
}
|
|
|
|
var err error
|
|
|
|
for _, msg := range validation.IsDNS1123Subdomain(strings.ToLower(driverName)) {
|
|
|
|
if err == nil {
|
|
|
|
err = errors.New(msg)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-03-13 05:09:58 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-06-25 11:30:04 +00:00
|
|
|
err = fmt.Errorf("%s: %w", msg, err)
|
2019-03-13 05:09:58 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-03-13 05:09:58 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-05-14 19:15:01 +00:00
|
|
|
|
2020-06-22 06:58:47 +00:00
|
|
|
// GetKernelVersion returns the version of the running Unix (like) system from the
|
2020-06-17 18:30:09 +00:00
|
|
|
// 'utsname' structs 'release' component.
|
2020-06-22 06:58:47 +00:00
|
|
|
func GetKernelVersion() (string, error) {
|
2020-06-17 18:30:09 +00:00
|
|
|
utsname := unix.Utsname{}
|
2021-05-06 09:49:27 +00:00
|
|
|
if err := unix.Uname(&utsname); err != nil {
|
2020-06-17 18:30:09 +00:00
|
|
|
return "", err
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-17 18:34:12 +00:00
|
|
|
return strings.TrimRight(string(utsname.Release[:]), "\x00"), nil
|
2020-06-17 18:30:09 +00:00
|
|
|
}
|
|
|
|
|
2020-11-24 11:54:29 +00:00
|
|
|
// KernelVersion holds kernel related information.
|
2020-06-22 07:09:45 +00:00
|
|
|
type KernelVersion struct {
|
|
|
|
Version int
|
|
|
|
PatchLevel int
|
|
|
|
SubLevel int
|
|
|
|
ExtraVersion int // prefix of the part after the first "-"
|
|
|
|
Distribution string // component of full extraversion
|
2022-04-07 11:34:25 +00:00
|
|
|
Backport bool // backport have a fixed version/patchlevel/sublevel
|
2020-06-22 07:09:45 +00:00
|
|
|
}
|
|
|
|
|
2021-07-06 20:43:04 +00:00
|
|
|
// parseKernelRelease parses a kernel release version string into:
|
|
|
|
// version, patch version, sub version and extra version.
|
|
|
|
func parseKernelRelease(release string) (int, int, int, int, error) {
|
|
|
|
version := 0
|
|
|
|
patchlevel := 0
|
|
|
|
minVersions := 2
|
|
|
|
|
|
|
|
extra := ""
|
|
|
|
n, err := fmt.Sscanf(release, "%d.%d%s", &version, &patchlevel, &extra)
|
|
|
|
if n < minVersions && err != nil {
|
|
|
|
return 0, 0, 0, 0, fmt.Errorf("failed to parse version and patchlevel from %s: %w", release, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
sublevel := 0
|
|
|
|
extraversion := 0
|
|
|
|
if n > minVersions {
|
|
|
|
n, err = fmt.Sscanf(extra, ".%d%s", &sublevel, &extra)
|
|
|
|
if err != nil && n == 0 && len(extra) > 0 && extra[0] != '-' && extra[0] == '.' {
|
|
|
|
return 0, 0, 0, 0, fmt.Errorf("failed to parse subversion from %s: %w", release, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
extra = strings.TrimPrefix(extra, "-")
|
|
|
|
// ignore errors, 1st component of extraversion does not need to be an int
|
|
|
|
_, err = fmt.Sscanf(extra, "%d", &extraversion)
|
|
|
|
if err != nil {
|
|
|
|
// "go lint" wants err to be checked...
|
|
|
|
extraversion = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return version, patchlevel, sublevel, extraversion, nil
|
|
|
|
}
|
|
|
|
|
2020-06-22 07:09:45 +00:00
|
|
|
// CheckKernelSupport checks the running kernel and comparing it to known
|
|
|
|
// versions that have support for required features . Distributors of
|
2022-04-07 11:34:25 +00:00
|
|
|
// enterprise Linux have backport quota support to previous versions. This
|
2020-06-22 07:09:45 +00:00
|
|
|
// function checks if the running kernel is one of the versions that have the
|
2022-04-07 11:34:25 +00:00
|
|
|
// feature/fixes backport.
|
2020-06-22 07:09:45 +00:00
|
|
|
//
|
|
|
|
// `uname -r` (or Uname().Utsname.Release has a format like 1.2.3-rc.vendor
|
|
|
|
// This can be slit up in the following components: - version (1) - patchlevel
|
|
|
|
// (2) - sublevel (3) - optional, defaults to 0 - extraversion (rc) - optional,
|
|
|
|
// matching integers only - distribution (.vendor) - optional, match against
|
|
|
|
// whole `uname -r` string
|
|
|
|
//
|
|
|
|
// For matching multiple versions, the kernelSupport type contains a backport
|
|
|
|
// bool, which will cause matching
|
|
|
|
// version+patchlevel+sublevel+(>=extraversion)+(~distribution)
|
|
|
|
//
|
|
|
|
// In case the backport bool is false, a simple check for higher versions than
|
|
|
|
// version+patchlevel+sublevel is done.
|
|
|
|
func CheckKernelSupport(release string, supportedVersions []KernelVersion) bool {
|
2021-07-06 20:43:04 +00:00
|
|
|
version, patchlevel, sublevel, extraversion, err := parseKernelRelease(release)
|
2020-06-22 07:09:45 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLogMsg("%v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-22 07:09:45 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// compare running kernel against known versions
|
|
|
|
for _, kernel := range supportedVersions {
|
|
|
|
if !kernel.Backport {
|
|
|
|
// deal with the default case(s), find >= match for version, patchlevel, sublevel
|
|
|
|
if version > kernel.Version || (version == kernel.Version && patchlevel > kernel.PatchLevel) ||
|
|
|
|
(version == kernel.Version && patchlevel == kernel.PatchLevel && sublevel >= kernel.SubLevel) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// specific backport, match distribution initially
|
|
|
|
if !strings.Contains(release, kernel.Distribution) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// strict match version, patchlevel, sublevel, and >= match extraversion
|
|
|
|
if version == kernel.Version && patchlevel == kernel.PatchLevel &&
|
|
|
|
sublevel == kernel.SubLevel && extraversion >= kernel.ExtraVersion {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-11-09 08:49:09 +00:00
|
|
|
log.WarningLogMsg("kernel %s does not support required features", release)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-22 07:09:45 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-05-14 19:15:01 +00:00
|
|
|
// GenerateVolID generates a volume ID based on passed in parameters and version, to be returned
|
2020-07-19 12:21:03 +00:00
|
|
|
// to the CO system.
|
2021-06-25 12:15:08 +00:00
|
|
|
func GenerateVolID(
|
|
|
|
ctx context.Context,
|
|
|
|
monitors string,
|
|
|
|
cr *Credentials,
|
|
|
|
locationID int64,
|
|
|
|
pool, clusterID, objUUID string,
|
2022-06-01 10:17:19 +00:00
|
|
|
volIDVersion uint16,
|
|
|
|
) (string, error) {
|
2020-01-24 16:26:56 +00:00
|
|
|
var err error
|
|
|
|
|
|
|
|
if locationID == InvalidPoolID {
|
2020-05-14 11:23:55 +00:00
|
|
|
locationID, err = GetPoolID(monitors, cr, pool)
|
2020-01-24 16:26:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2019-05-14 19:15:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// generate the volume ID to return to the CO system
|
|
|
|
vi := CSIIdentifier{
|
2020-01-24 16:26:56 +00:00
|
|
|
LocationID: locationID,
|
2019-05-14 19:15:01 +00:00
|
|
|
EncodingVersion: volIDVersion,
|
|
|
|
ClusterID: clusterID,
|
|
|
|
ObjectUUID: objUUID,
|
|
|
|
}
|
|
|
|
|
|
|
|
volID, err := vi.ComposeCSIID()
|
|
|
|
|
|
|
|
return volID, err
|
|
|
|
}
|
2019-07-03 10:02:36 +00:00
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// CreateMountPoint creates the directory with given path.
|
2019-07-03 10:02:36 +00:00
|
|
|
func CreateMountPoint(mountPath string) error {
|
2021-07-13 12:21:05 +00:00
|
|
|
return os.MkdirAll(mountPath, 0o750)
|
2019-07-03 10:02:36 +00:00
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// checkDirExists checks directory exists or not.
|
2019-07-25 09:01:10 +00:00
|
|
|
func checkDirExists(p string) bool {
|
|
|
|
if _, err := os.Stat(p); os.IsNotExist(err) {
|
|
|
|
return false
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-07-25 09:01:10 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// IsMountPoint checks if the given path is mountpoint or not.
|
2019-07-03 10:02:36 +00:00
|
|
|
func IsMountPoint(p string) (bool, error) {
|
|
|
|
dummyMount := mount.New("")
|
|
|
|
notMnt, err := dummyMount.IsLikelyNotMountPoint(p)
|
|
|
|
if err != nil {
|
2021-06-07 05:35:08 +00:00
|
|
|
return false, err
|
2019-07-03 10:02:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return !notMnt, nil
|
|
|
|
}
|
|
|
|
|
2022-02-02 12:23:06 +00:00
|
|
|
// IsCorruptedMountError checks if the given error is a result of a corrupted
|
|
|
|
// mountpoint.
|
|
|
|
func IsCorruptedMountError(err error) bool {
|
|
|
|
return mount.IsCorruptedMnt(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadMountInfoForProc reads /proc/<PID>/mountpoint and marshals it into
|
|
|
|
// MountInfo structs.
|
|
|
|
func ReadMountInfoForProc(proc string) ([]mount.MountInfo, error) {
|
|
|
|
return mount.ParseMountInfo(fmt.Sprintf("/proc/%s/mountinfo", proc))
|
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// Mount mounts the source to target path.
|
2019-07-03 10:02:36 +00:00
|
|
|
func Mount(source, target, fstype string, options []string) error {
|
|
|
|
dummyMount := mount.New("")
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-07-03 10:02:36 +00:00
|
|
|
return dummyMount.Mount(source, target, fstype, options)
|
|
|
|
}
|
2020-06-17 09:00:55 +00:00
|
|
|
|
|
|
|
// MountOptionsAdd adds the `add` mount options to the `options` and returns a
|
|
|
|
// new string. In case `add` is already present in the `options`, `add` is not
|
|
|
|
// added again.
|
|
|
|
func MountOptionsAdd(options string, add ...string) string {
|
|
|
|
opts := strings.Split(options, ",")
|
|
|
|
newOpts := []string{}
|
|
|
|
// clean original options from empty strings
|
|
|
|
for _, opt := range opts {
|
|
|
|
if opt != "" {
|
|
|
|
newOpts = append(newOpts, opt)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, opt := range add {
|
|
|
|
if opt != "" && !contains(newOpts, opt) {
|
|
|
|
newOpts = append(newOpts, opt)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return strings.Join(newOpts, ",")
|
|
|
|
}
|
|
|
|
|
|
|
|
func contains(s []string, key string) bool {
|
|
|
|
for _, v := range s {
|
|
|
|
if v == key {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
2021-03-29 09:38:47 +00:00
|
|
|
|
2021-04-09 12:04:38 +00:00
|
|
|
// CallStack returns the stack of the calls in the current goroutine. Useful
|
|
|
|
// for debugging or reporting errors. This is a friendly alternative to
|
|
|
|
// assert() or panic().
|
|
|
|
func CallStack() string {
|
|
|
|
stack := make([]byte, 2048)
|
|
|
|
_ = runtime.Stack(stack, false)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-09 12:04:38 +00:00
|
|
|
return string(stack)
|
|
|
|
}
|