Merge pull request #140 from ceph/devel

Sync the upstream changes from `ceph/ceph-csi:devel` into the `devel` branch
This commit is contained in:
OpenShift Merge Robot 2022-10-18 11:00:26 -04:00 committed by GitHub
commit b7d6efac82
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
97 changed files with 10913 additions and 797 deletions

View File

@ -39,6 +39,7 @@ func init() {
flag.BoolVar(&deployNFS, "deploy-nfs", false, "deploy nfs csi driver") flag.BoolVar(&deployNFS, "deploy-nfs", false, "deploy nfs csi driver")
flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephFS csi driver") flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephFS csi driver")
flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver") flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver")
flag.BoolVar(&testRBDFSCrypt, "test-rbd-fscrypt", false, "test rbd csi driver fscrypt support")
flag.BoolVar(&testNBD, "test-nbd", false, "test rbd csi driver with rbd-nbd mounter") flag.BoolVar(&testNBD, "test-nbd", false, "test rbd csi driver with rbd-nbd mounter")
flag.BoolVar(&testNFS, "test-nfs", false, "test nfs csi driver") flag.BoolVar(&testNFS, "test-nfs", false, "test nfs csi driver")
flag.BoolVar(&helmTest, "helm-test", false, "tests running on deployment via helm") flag.BoolVar(&helmTest, "helm-test", false, "tests running on deployment via helm")

View File

@ -232,6 +232,24 @@ func checkClusternameInMetadata(f *framework.Framework, ns, pool, image string)
} }
} }
// ByFileAndBlockEncryption wraps ginkgo's By to run the test body using file and block encryption specific validators.
func ByFileAndBlockEncryption(
text string,
callback func(validator encryptionValidateFunc, pvcValidator validateFunc, encryptionType util.EncryptionType),
) {
By(text+" (block)", func() {
callback(validateEncryptedPVCAndAppBinding, isBlockEncryptedPVC, util.EncryptionTypeBlock)
})
By(text+" (file)", func() {
if !testRBDFSCrypt {
e2elog.Logf("skipping RBD fscrypt file encryption test")
return
}
callback(validateEncryptedFilesystemAndAppBinding, isFileEncryptedPVC, util.EncryptionTypeFile)
})
}
var _ = Describe("RBD", func() { var _ = Describe("RBD", func() {
f := framework.NewDefaultFramework(rbdType) f := framework.NewDefaultFramework(rbdType)
var c clientset.Interface var c clientset.Interface
@ -989,7 +1007,8 @@ var _ = Describe("RBD", func() {
noKMS, noKMS, noKMS, noKMS,
defaultSCName, defaultSCName,
erasureCodedPool, erasureCodedPool,
f) f,
noPVCValidation)
}) })
By("create an erasure coded PVC and validate PVC-PVC clone", func() { By("create an erasure coded PVC and validate PVC-PVC clone", func() {
@ -1871,13 +1890,14 @@ var _ = Describe("RBD", func() {
} }
}) })
By("create a PVC and bind it to an app using rbd-nbd mounter with encryption", func() { ByFileAndBlockEncryption("create a PVC and bind it to an app using rbd-nbd mounter with encryption", func(
validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType,
) {
if !testNBD { if !testNBD {
e2elog.Logf("skipping NBD test") e2elog.Logf("skipping NBD test")
return return
} }
err := deleteResource(rbdExamplePath + "storageclass.yaml") err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) e2elog.Failf("failed to delete storageclass: %v", err)
@ -1893,12 +1913,13 @@ var _ = Describe("RBD", func() {
"mapOptions": nbdMapOptions, "mapOptions": nbdMapOptions,
"cephLogStrategy": e2eDefaultCephLogStrategy, "cephLogStrategy": e2eDefaultCephLogStrategy,
"encrypted": "true", "encrypted": "true",
"encryptionType": util.EncryptionTypeString(encType),
}, },
deletePolicy) deletePolicy)
if err != nil { if err != nil {
e2elog.Failf("failed to create storageclass: %v", err) e2elog.Failf("failed to create storageclass: %v", err)
} }
err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) err = validator(pvcPath, appPath, noKMS, f)
if err != nil { if err != nil {
e2elog.Failf("failed to validate encrypted pvc: %v", err) e2elog.Failf("failed to validate encrypted pvc: %v", err)
} }
@ -1915,7 +1936,9 @@ var _ = Describe("RBD", func() {
} }
}) })
By("create a PVC and bind it to an app with encrypted RBD volume", func() { ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume", func(
validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType,
) {
err := deleteResource(rbdExamplePath + "storageclass.yaml") err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) e2elog.Failf("failed to delete storageclass: %v", err)
@ -1925,12 +1948,12 @@ var _ = Describe("RBD", func() {
f, f,
defaultSCName, defaultSCName,
nil, nil,
map[string]string{"encrypted": "true"}, map[string]string{"encrypted": "true", "encryptionType": util.EncryptionTypeString(encType)},
deletePolicy) deletePolicy)
if err != nil { if err != nil {
e2elog.Failf("failed to create storageclass: %v", err) e2elog.Failf("failed to create storageclass: %v", err)
} }
err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) err = validator(pvcPath, appPath, noKMS, f)
if err != nil { if err != nil {
e2elog.Failf("failed to validate encrypted pvc: %v", err) e2elog.Failf("failed to validate encrypted pvc: %v", err)
} }
@ -1947,7 +1970,9 @@ var _ = Describe("RBD", func() {
} }
}) })
By("Resize Encrypted Block PVC and check Device size", func() { ByFileAndBlockEncryption("Resize Encrypted Block PVC and check Device size", func(
validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType,
) {
err := deleteResource(rbdExamplePath + "storageclass.yaml") err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) e2elog.Failf("failed to delete storageclass: %v", err)
@ -1957,7 +1982,7 @@ var _ = Describe("RBD", func() {
f, f,
defaultSCName, defaultSCName,
nil, nil,
map[string]string{"encrypted": "true"}, map[string]string{"encrypted": "true", "encryptionType": util.EncryptionTypeString(encType)},
deletePolicy) deletePolicy)
if err != nil { if err != nil {
e2elog.Failf("failed to create storageclass: %v", err) e2elog.Failf("failed to create storageclass: %v", err)
@ -1972,15 +1997,16 @@ var _ = Describe("RBD", func() {
validateRBDImageCount(f, 0, defaultRBDPool) validateRBDImageCount(f, 0, defaultRBDPool)
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
// Block PVC resize if encType != util.EncryptionTypeFile {
err = resizePVCAndValidateSize(rawPvcPath, rawAppPath, f) // Block PVC resize
if err != nil { err = resizePVCAndValidateSize(rawPvcPath, rawAppPath, f)
e2elog.Failf("failed to resize block PVC: %v", err) if err != nil {
e2elog.Failf("failed to resize block PVC: %v", err)
}
// validate created backend rbd images
validateRBDImageCount(f, 0, defaultRBDPool)
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
} }
// validate created backend rbd images
validateRBDImageCount(f, 0, defaultRBDPool)
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
err = deleteResource(rbdExamplePath + "storageclass.yaml") err = deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) e2elog.Failf("failed to delete storageclass: %v", err)
@ -1991,7 +2017,9 @@ var _ = Describe("RBD", func() {
} }
}) })
By("create a PVC and bind it to an app with encrypted RBD volume with VaultKMS", func() { ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with VaultKMS", func(
validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType,
) {
err := deleteResource(rbdExamplePath + "storageclass.yaml") err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) e2elog.Failf("failed to delete storageclass: %v", err)
@ -1999,12 +2027,13 @@ var _ = Describe("RBD", func() {
scOpts := map[string]string{ scOpts := map[string]string{
"encrypted": "true", "encrypted": "true",
"encryptionKMSID": "vault-test", "encryptionKMSID": "vault-test",
"encryptionType": util.EncryptionTypeString(encType),
} }
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
if err != nil { if err != nil {
e2elog.Failf("failed to create storageclass: %v", err) e2elog.Failf("failed to create storageclass: %v", err)
} }
err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, vaultKMS, f) err = validator(pvcPath, appPath, vaultKMS, f)
if err != nil { if err != nil {
e2elog.Failf("failed to validate encrypted pvc: %v", err) e2elog.Failf("failed to validate encrypted pvc: %v", err)
} }
@ -2021,7 +2050,9 @@ var _ = Describe("RBD", func() {
} }
}) })
By("create a PVC and bind it to an app with encrypted RBD volume with VaultTokensKMS", func() { ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with VaultTokensKMS", func(
validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType,
) {
err := deleteResource(rbdExamplePath + "storageclass.yaml") err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) e2elog.Failf("failed to delete storageclass: %v", err)
@ -2029,6 +2060,7 @@ var _ = Describe("RBD", func() {
scOpts := map[string]string{ scOpts := map[string]string{
"encrypted": "true", "encrypted": "true",
"encryptionKMSID": "vault-tokens-test", "encryptionKMSID": "vault-tokens-test",
"encryptionType": util.EncryptionTypeString(encType),
} }
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
if err != nil { if err != nil {
@ -2048,7 +2080,7 @@ var _ = Describe("RBD", func() {
e2elog.Failf("failed to create Secret with tenant token: %v", err) e2elog.Failf("failed to create Secret with tenant token: %v", err)
} }
err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, vaultTokensKMS, f) err = validator(pvcPath, appPath, vaultTokensKMS, f)
if err != nil { if err != nil {
e2elog.Failf("failed to validate encrypted pvc: %v", err) e2elog.Failf("failed to validate encrypted pvc: %v", err)
} }
@ -2072,7 +2104,9 @@ var _ = Describe("RBD", func() {
} }
}) })
By("create a PVC and bind it to an app with encrypted RBD volume with VaultTenantSA KMS", func() { ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with VaultTenantSA KMS", func(
validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType,
) {
err := deleteResource(rbdExamplePath + "storageclass.yaml") err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) e2elog.Failf("failed to delete storageclass: %v", err)
@ -2080,6 +2114,7 @@ var _ = Describe("RBD", func() {
scOpts := map[string]string{ scOpts := map[string]string{
"encrypted": "true", "encrypted": "true",
"encryptionKMSID": "vault-tenant-sa-test", "encryptionKMSID": "vault-tenant-sa-test",
"encryptionType": util.EncryptionTypeString(encType),
} }
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
if err != nil { if err != nil {
@ -2092,7 +2127,7 @@ var _ = Describe("RBD", func() {
} }
defer deleteTenantServiceAccount(f.UniqueName) defer deleteTenantServiceAccount(f.UniqueName)
err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, vaultTenantSAKMS, f) err = validator(pvcPath, appPath, vaultTenantSAKMS, f)
if err != nil { if err != nil {
e2elog.Failf("failed to validate encrypted pvc: %v", err) e2elog.Failf("failed to validate encrypted pvc: %v", err)
} }
@ -2109,37 +2144,41 @@ var _ = Describe("RBD", func() {
} }
}) })
By("create a PVC and bind it to an app with encrypted RBD volume with SecretsMetadataKMS", func() { ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with SecretsMetadataKMS",
err := deleteResource(rbdExamplePath + "storageclass.yaml") func(validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType) {
if err != nil { err := deleteResource(rbdExamplePath + "storageclass.yaml")
e2elog.Failf("failed to delete storageclass: %v", err) if err != nil {
} e2elog.Failf("failed to delete storageclass: %v", err)
scOpts := map[string]string{ }
"encrypted": "true", scOpts := map[string]string{
"encryptionKMSID": "secrets-metadata-test", "encrypted": "true",
} "encryptionKMSID": "secrets-metadata-test",
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) "encryptionType": util.EncryptionTypeString(encType),
if err != nil { }
e2elog.Failf("failed to create storageclass: %v", err) err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
} if err != nil {
err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) e2elog.Failf("failed to create storageclass: %v", err)
if err != nil { }
e2elog.Failf("failed to validate encrypted pvc: %v", err) err = validator(pvcPath, appPath, noKMS, f)
} if err != nil {
// validate created backend rbd images e2elog.Failf("failed to validate encrypted pvc: %v", err)
validateRBDImageCount(f, 0, defaultRBDPool) }
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) // validate created backend rbd images
err = deleteResource(rbdExamplePath + "storageclass.yaml") validateRBDImageCount(f, 0, defaultRBDPool)
if err != nil { validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
e2elog.Failf("failed to delete storageclass: %v", err) err = deleteResource(rbdExamplePath + "storageclass.yaml")
} if err != nil {
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) e2elog.Failf("failed to delete storageclass: %v", err)
if err != nil { }
e2elog.Failf("failed to create storageclass: %v", err) err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy)
} if err != nil {
}) e2elog.Failf("failed to create storageclass: %v", err)
}
})
By("test RBD volume encryption with user secrets based SecretsMetadataKMS", func() { ByFileAndBlockEncryption("test RBD volume encryption with user secrets based SecretsMetadataKMS", func(
validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType,
) {
err := deleteResource(rbdExamplePath + "storageclass.yaml") err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) e2elog.Failf("failed to delete storageclass: %v", err)
@ -2147,6 +2186,7 @@ var _ = Describe("RBD", func() {
scOpts := map[string]string{ scOpts := map[string]string{
"encrypted": "true", "encrypted": "true",
"encryptionKMSID": "user-ns-secrets-metadata-test", "encryptionKMSID": "user-ns-secrets-metadata-test",
"encryptionType": util.EncryptionTypeString(encType),
} }
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
if err != nil { if err != nil {
@ -2162,7 +2202,7 @@ var _ = Describe("RBD", func() {
e2elog.Failf("failed to create user Secret: %v", err) e2elog.Failf("failed to create user Secret: %v", err)
} }
err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) err = validator(pvcPath, appPath, noKMS, f)
if err != nil { if err != nil {
e2elog.Failf("failed to validate encrypted pvc: %v", err) e2elog.Failf("failed to validate encrypted pvc: %v", err)
} }
@ -2190,9 +2230,9 @@ var _ = Describe("RBD", func() {
} }
}) })
By( ByFileAndBlockEncryption(
"test RBD volume encryption with user secrets based SecretsMetadataKMS with tenant namespace", "test RBD volume encryption with user secrets based SecretsMetadataKMS with tenant namespace",
func() { func(validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType util.EncryptionType) {
err := deleteResource(rbdExamplePath + "storageclass.yaml") err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) e2elog.Failf("failed to delete storageclass: %v", err)
@ -2200,6 +2240,7 @@ var _ = Describe("RBD", func() {
scOpts := map[string]string{ scOpts := map[string]string{
"encrypted": "true", "encrypted": "true",
"encryptionKMSID": "user-secrets-metadata-test", "encryptionKMSID": "user-secrets-metadata-test",
"encryptionType": util.EncryptionTypeString(encType),
} }
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
if err != nil { if err != nil {
@ -2215,7 +2256,7 @@ var _ = Describe("RBD", func() {
e2elog.Failf("failed to create user Secret: %v", err) e2elog.Failf("failed to create user Secret: %v", err)
} }
err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) err = validator(pvcPath, appPath, noKMS, f)
if err != nil { if err != nil {
e2elog.Failf("failed to validate encrypted pvc: %v", err) e2elog.Failf("failed to validate encrypted pvc: %v", err)
} }
@ -2298,7 +2339,8 @@ var _ = Describe("RBD", func() {
noKMS, noKMS, noKMS, noKMS,
defaultSCName, defaultSCName,
noDataPool, noDataPool,
f) f,
noPVCValidation)
}) })
By("create a PVC-PVC clone and bind it to an app", func() { By("create a PVC-PVC clone and bind it to an app", func() {
@ -2315,7 +2357,9 @@ var _ = Describe("RBD", func() {
f) f)
}) })
By("create an encrypted PVC snapshot and restore it for an app with VaultKMS", func() { ByFileAndBlockEncryption("create an encrypted PVC snapshot and restore it for an app with VaultKMS", func(
validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType util.EncryptionType,
) {
err := deleteResource(rbdExamplePath + "storageclass.yaml") err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) e2elog.Failf("failed to delete storageclass: %v", err)
@ -2323,6 +2367,7 @@ var _ = Describe("RBD", func() {
scOpts := map[string]string{ scOpts := map[string]string{
"encrypted": "true", "encrypted": "true",
"encryptionKMSID": "vault-test", "encryptionKMSID": "vault-test",
"encryptionType": util.EncryptionTypeString(encType),
} }
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
if err != nil { if err != nil {
@ -2333,7 +2378,7 @@ var _ = Describe("RBD", func() {
pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath, pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath,
vaultKMS, vaultKMS, vaultKMS, vaultKMS,
defaultSCName, noDataPool, defaultSCName, noDataPool,
f) f, isEncryptedPVC)
err = deleteResource(rbdExamplePath + "storageclass.yaml") err = deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
@ -2345,7 +2390,9 @@ var _ = Describe("RBD", func() {
} }
}) })
By("Validate PVC restore from vaultKMS to vaultTenantSAKMS", func() { ByFileAndBlockEncryption("Validate PVC restore from vaultKMS to vaultTenantSAKMS", func(
validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType util.EncryptionType,
) {
restoreSCName := "restore-sc" restoreSCName := "restore-sc"
err := deleteResource(rbdExamplePath + "storageclass.yaml") err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
@ -2354,6 +2401,7 @@ var _ = Describe("RBD", func() {
scOpts := map[string]string{ scOpts := map[string]string{
"encrypted": "true", "encrypted": "true",
"encryptionKMSID": "vault-test", "encryptionKMSID": "vault-test",
"encryptionType": util.EncryptionTypeString(encType),
} }
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
if err != nil { if err != nil {
@ -2363,6 +2411,7 @@ var _ = Describe("RBD", func() {
scOpts = map[string]string{ scOpts = map[string]string{
"encrypted": "true", "encrypted": "true",
"encryptionKMSID": "vault-tenant-sa-test", "encryptionKMSID": "vault-tenant-sa-test",
"encryptionType": util.EncryptionTypeString(encType),
} }
err = createRBDStorageClass(f.ClientSet, f, restoreSCName, nil, scOpts, deletePolicy) err = createRBDStorageClass(f.ClientSet, f, restoreSCName, nil, scOpts, deletePolicy)
if err != nil { if err != nil {
@ -2378,7 +2427,8 @@ var _ = Describe("RBD", func() {
validatePVCSnapshot(1, validatePVCSnapshot(1,
pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath, pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath,
vaultKMS, vaultTenantSAKMS, vaultKMS, vaultTenantSAKMS,
restoreSCName, noDataPool, f) restoreSCName, noDataPool, f,
isEncryptedPVC)
err = retryKubectlArgs(cephCSINamespace, kubectlDelete, deployTimeout, "storageclass", restoreSCName) err = retryKubectlArgs(cephCSINamespace, kubectlDelete, deployTimeout, "storageclass", restoreSCName)
if err != nil { if err != nil {
@ -2400,7 +2450,9 @@ var _ = Describe("RBD", func() {
} }
}) })
By("Validate PVC-PVC clone with different SC from vaultKMS to vaultTenantSAKMS", func() { ByFileAndBlockEncryption("Validate PVC-PVC clone with different SC from vaultKMS to vaultTenantSAKMS", func(
validator encryptionValidateFunc, isValidPVC validateFunc, encType util.EncryptionType,
) {
restoreSCName := "restore-sc" restoreSCName := "restore-sc"
err := deleteResource(rbdExamplePath + "storageclass.yaml") err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
@ -2409,6 +2461,7 @@ var _ = Describe("RBD", func() {
scOpts := map[string]string{ scOpts := map[string]string{
"encrypted": "true", "encrypted": "true",
"encryptionKMSID": "vault-test", "encryptionKMSID": "vault-test",
"encryptionType": util.EncryptionTypeString(encType),
} }
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
if err != nil { if err != nil {
@ -2418,6 +2471,7 @@ var _ = Describe("RBD", func() {
scOpts = map[string]string{ scOpts = map[string]string{
"encrypted": "true", "encrypted": "true",
"encryptionKMSID": "vault-tenant-sa-test", "encryptionKMSID": "vault-tenant-sa-test",
"encryptionType": util.EncryptionTypeString(encType),
} }
err = createRBDStorageClass(f.ClientSet, f, restoreSCName, nil, scOpts, deletePolicy) err = createRBDStorageClass(f.ClientSet, f, restoreSCName, nil, scOpts, deletePolicy)
if err != nil { if err != nil {
@ -2438,7 +2492,7 @@ var _ = Describe("RBD", func() {
restoreSCName, restoreSCName,
noDataPool, noDataPool,
secretsMetadataKMS, secretsMetadataKMS,
isEncryptedPVC, isValidPVC,
f) f)
err = retryKubectlArgs(cephCSINamespace, kubectlDelete, deployTimeout, "storageclass", restoreSCName) err = retryKubectlArgs(cephCSINamespace, kubectlDelete, deployTimeout, "storageclass", restoreSCName)
@ -2460,7 +2514,9 @@ var _ = Describe("RBD", func() {
} }
}) })
By("create an encrypted PVC-PVC clone and bind it to an app", func() { ByFileAndBlockEncryption("create an encrypted PVC-PVC clone and bind it to an app", func(
validator encryptionValidateFunc, isValidPVC validateFunc, encType util.EncryptionType,
) {
err := deleteResource(rbdExamplePath + "storageclass.yaml") err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) e2elog.Failf("failed to delete storageclass: %v", err)
@ -2468,6 +2524,7 @@ var _ = Describe("RBD", func() {
scOpts := map[string]string{ scOpts := map[string]string{
"encrypted": "true", "encrypted": "true",
"encryptionKMSID": "secrets-metadata-test", "encryptionKMSID": "secrets-metadata-test",
"encryptionType": util.EncryptionTypeString(encType),
} }
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
if err != nil { if err != nil {
@ -2482,7 +2539,7 @@ var _ = Describe("RBD", func() {
defaultSCName, defaultSCName,
noDataPool, noDataPool,
secretsMetadataKMS, secretsMetadataKMS,
isEncryptedPVC, isValidPVC,
f) f)
err = deleteResource(rbdExamplePath + "storageclass.yaml") err = deleteResource(rbdExamplePath + "storageclass.yaml")
@ -2495,7 +2552,9 @@ var _ = Describe("RBD", func() {
} }
}) })
By("create an encrypted PVC-PVC clone and bind it to an app with VaultKMS", func() { ByFileAndBlockEncryption("create an encrypted PVC-PVC clone and bind it to an app with VaultKMS", func(
validator encryptionValidateFunc, isValidPVC validateFunc, encType util.EncryptionType,
) {
err := deleteResource(rbdExamplePath + "storageclass.yaml") err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) e2elog.Failf("failed to delete storageclass: %v", err)
@ -2503,6 +2562,7 @@ var _ = Describe("RBD", func() {
scOpts := map[string]string{ scOpts := map[string]string{
"encrypted": "true", "encrypted": "true",
"encryptionKMSID": "vault-test", "encryptionKMSID": "vault-test",
"encryptionType": util.EncryptionTypeString(encType),
} }
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
if err != nil { if err != nil {
@ -2517,7 +2577,7 @@ var _ = Describe("RBD", func() {
defaultSCName, defaultSCName,
noDataPool, noDataPool,
vaultKMS, vaultKMS,
isEncryptedPVC, isValidPVC,
f) f)
err = deleteResource(rbdExamplePath + "storageclass.yaml") err = deleteResource(rbdExamplePath + "storageclass.yaml")
@ -3992,10 +4052,13 @@ var _ = Describe("RBD", func() {
} }
}) })
By("restore snapshot to bigger size encrypted PVC with VaultKMS", func() { ByFileAndBlockEncryption("restore snapshot to bigger size encrypted PVC with VaultKMS", func(
_ encryptionValidateFunc, _ validateFunc, encType util.EncryptionType,
) {
scOpts := map[string]string{ scOpts := map[string]string{
"encrypted": "true", "encrypted": "true",
"encryptionKMSID": "vault-test", "encryptionKMSID": "vault-test",
"encryptionType": util.EncryptionTypeString(encType),
} }
err := createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) err := createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
if err != nil { if err != nil {
@ -4027,15 +4090,17 @@ var _ = Describe("RBD", func() {
if err != nil { if err != nil {
e2elog.Failf("failed to validate restore bigger size clone: %v", err) e2elog.Failf("failed to validate restore bigger size clone: %v", err)
} }
// validate block mode PVC if encType != util.EncryptionTypeFile {
err = validateBiggerPVCFromSnapshot(f, // validate block mode PVC
rawPvcPath, err = validateBiggerPVCFromSnapshot(f,
rawAppPath, rawPvcPath,
snapshotPath, rawAppPath,
pvcBlockRestorePath, snapshotPath,
appBlockRestorePath) pvcBlockRestorePath,
if err != nil { appBlockRestorePath)
e2elog.Failf("failed to validate restore bigger size clone: %v", err) if err != nil {
e2elog.Failf("failed to validate restore bigger size clone: %v", err)
}
} }
}) })
@ -4050,9 +4115,12 @@ var _ = Describe("RBD", func() {
}) })
By("clone PVC to a bigger size PVC", func() { By("clone PVC to a bigger size PVC", func() {
By("clone PVC to bigger size encrypted PVC with VaultKMS", func() { ByFileAndBlockEncryption("clone PVC to bigger size encrypted PVC with VaultKMS", func(
validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType,
) {
scOpts := map[string]string{ scOpts := map[string]string{
"encrypted": "true", "encrypted": "true",
"encryptionType": util.EncryptionTypeString(encType),
"encryptionKMSID": "vault-test", "encryptionKMSID": "vault-test",
} }
err := createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) err := createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
@ -4075,14 +4143,16 @@ var _ = Describe("RBD", func() {
if err != nil { if err != nil {
e2elog.Failf("failed to validate bigger size clone: %v", err) e2elog.Failf("failed to validate bigger size clone: %v", err)
} }
// validate block mode PVC if encType != util.EncryptionTypeFile {
err = validateBiggerCloneFromPVC(f, // validate block mode PVC
rawPvcPath, err = validateBiggerCloneFromPVC(f,
rawAppPath, rawPvcPath,
pvcBlockSmartClonePath, rawAppPath,
appBlockSmartClonePath) pvcBlockSmartClonePath,
if err != nil { appBlockSmartClonePath)
e2elog.Failf("failed to validate bigger size clone: %v", err) if err != nil {
e2elog.Failf("failed to validate bigger size clone: %v", err)
}
} }
}) })
@ -4268,14 +4338,38 @@ var _ = Describe("RBD", func() {
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
}) })
// Make sure this should be last testcase in this file, because By("create a PVC and bind it to an app with encrypted RBD volume (default type setting)", func() {
// it deletes pool err := deleteResource(rbdExamplePath + "storageclass.yaml")
By("Create a PVC and delete PVC when backend pool deleted", func() {
err := pvcDeleteWhenPoolNotFound(pvcPath, false, f)
if err != nil { if err != nil {
e2elog.Failf("failed to delete PVC when pool not found: %v", err) e2elog.Failf("failed to delete storageclass: %v", err)
}
err = createRBDStorageClass(
f.ClientSet,
f,
defaultSCName,
nil,
map[string]string{"encrypted": "true"},
deletePolicy)
if err != nil {
e2elog.Failf("failed to create storageclass: %v", err)
}
err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f)
if err != nil {
e2elog.Failf("failed to validate encrypted pvc: %v", err)
}
// validate created backend rbd images
validateRBDImageCount(f, 0, defaultRBDPool)
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
err = deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
}
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy)
if err != nil {
e2elog.Failf("failed to create storageclass: %v", err)
} }
}) })
// delete RBD provisioner secret // delete RBD provisioner secret
err := deleteCephUser(f, keyringRBDProvisionerUsername) err := deleteCephUser(f, keyringRBDProvisionerUsername)
if err != nil { if err != nil {
@ -4286,6 +4380,15 @@ var _ = Describe("RBD", func() {
if err != nil { if err != nil {
e2elog.Failf("failed to delete user %s: %v", keyringRBDNodePluginUsername, err) e2elog.Failf("failed to delete user %s: %v", keyringRBDNodePluginUsername, err)
} }
// Make sure this should be last testcase in this file, because
// it deletes pool
By("Create a PVC and delete PVC when backend pool deleted", func() {
err := pvcDeleteWhenPoolNotFound(pvcPath, false, f)
if err != nil {
e2elog.Failf("failed to delete PVC when pool not found: %v", err)
}
})
}) })
}) })
}) })

View File

@ -482,6 +482,8 @@ func validateCloneInDifferentPool(f *framework.Framework, snapshotPool, cloneSc,
return nil return nil
} }
type encryptionValidateFunc func(pvcPath, appPath string, kms kmsConfig, f *framework.Framework) error
func validateEncryptedPVCAndAppBinding(pvcPath, appPath string, kms kmsConfig, f *framework.Framework) error { func validateEncryptedPVCAndAppBinding(pvcPath, appPath string, kms kmsConfig, f *framework.Framework) error {
pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout) pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout)
if err != nil { if err != nil {
@ -531,20 +533,84 @@ func validateEncryptedPVCAndAppBinding(pvcPath, appPath string, kms kmsConfig, f
return nil return nil
} }
func validateEncryptedFilesystemAndAppBinding(pvcPath, appPath string, kms kmsConfig, f *framework.Framework) error {
pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout)
if err != nil {
return err
}
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
if err != nil {
return err
}
rbdImageSpec := imageSpec(defaultRBDPool, imageData.imageName)
err = validateEncryptedFilesystem(f, rbdImageSpec, imageData.pvName, app.Name)
if err != nil {
return err
}
if kms != noKMS && kms.canGetPassphrase() {
// check new passphrase created
_, stdErr := kms.getPassphrase(f, imageData.csiVolumeHandle)
if stdErr != "" {
return fmt.Errorf("failed to read passphrase from vault: %s", stdErr)
}
}
err = deletePVCAndApp("", f, pvc, app)
if err != nil {
return err
}
if kms != noKMS && kms.canGetPassphrase() {
// check new passphrase created
stdOut, _ := kms.getPassphrase(f, imageData.csiVolumeHandle)
if stdOut != "" {
return fmt.Errorf("passphrase found in vault while should be deleted: %s", stdOut)
}
}
if kms != noKMS && kms.canVerifyKeyDestroyed() {
destroyed, msg := kms.verifyKeyDestroyed(f, imageData.csiVolumeHandle)
if !destroyed {
return fmt.Errorf("passphrased was not destroyed: %s", msg)
} else if msg != "" {
e2elog.Logf("passphrase destroyed, but message returned: %s", msg)
}
}
return nil
}
type validateFunc func(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error type validateFunc func(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error
// noPVCValidation can be used to pass to validatePVCClone when no extra // noPVCValidation can be used to pass to validatePVCClone when no extra
// validation of the PVC is needed. // validation of the PVC is needed.
var noPVCValidation validateFunc var noPVCValidation validateFunc
func isEncryptedPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error { type imageValidateFunc func(f *framework.Framework, rbdImageSpec, pvName, appName string) error
func isEncryptedPVC(
f *framework.Framework,
pvc *v1.PersistentVolumeClaim,
app *v1.Pod,
validateFunc imageValidateFunc,
) error {
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
if err != nil { if err != nil {
return err return err
} }
rbdImageSpec := imageSpec(defaultRBDPool, imageData.imageName) rbdImageSpec := imageSpec(defaultRBDPool, imageData.imageName)
return validateEncryptedImage(f, rbdImageSpec, imageData.pvName, app.Name) return validateFunc(f, rbdImageSpec, imageData.pvName, app.Name)
}
func isBlockEncryptedPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error {
return isEncryptedPVC(f, pvc, app, validateEncryptedImage)
}
func isFileEncryptedPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error {
return isEncryptedPVC(f, pvc, app, validateEncryptedFilesystem)
} }
// validateEncryptedImage verifies that the RBD image is encrypted. The // validateEncryptedImage verifies that the RBD image is encrypted. The
@ -583,6 +649,48 @@ func validateEncryptedImage(f *framework.Framework, rbdImageSpec, pvName, appNam
return nil return nil
} }
func validateEncryptedFilesystem(f *framework.Framework, rbdImageSpec, pvName, appName string) error {
pod, err := f.ClientSet.CoreV1().Pods(f.UniqueName).Get(context.TODO(), appName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get pod %q in namespace %q: %w", appName, f.UniqueName, err)
}
volumeMountPath := fmt.Sprintf(
"/var/lib/kubelet/pods/%s/volumes/kubernetes.io~csi/%s/mount",
pod.UID,
pvName)
selector, err := getDaemonSetLabelSelector(f, cephCSINamespace, rbdDaemonsetName)
if err != nil {
return fmt.Errorf("failed to get labels: %w", err)
}
opt := metav1.ListOptions{
LabelSelector: selector,
}
cmd := fmt.Sprintf("lsattr -la %s | grep -E '%s/.\\s+Encrypted'", volumeMountPath, volumeMountPath)
_, _, err = execCommandInContainer(f, cmd, cephCSINamespace, "csi-rbdplugin", &opt)
if err != nil {
cmd = fmt.Sprintf("lsattr -lRa %s", volumeMountPath)
stdOut, stdErr, listErr := execCommandInContainer(f, cmd, cephCSINamespace, "csi-rbdplugin", &opt)
if listErr == nil {
return fmt.Errorf("error checking file encrypted attribute of %q. listing filesystem+attrs: %s %s",
volumeMountPath, stdOut, stdErr)
}
return fmt.Errorf("error checking file encrypted attribute: %w", err)
}
mountType, err := getMountType(selector, volumeMountPath, f)
if err != nil {
return err
}
if mountType == "crypt" {
return fmt.Errorf("mount type of %q is %v suggesting that the block device was encrypted,"+
" when it must not have been", volumeMountPath, mountType)
}
return nil
}
func listRBDImages(f *framework.Framework, pool string) ([]string, error) { func listRBDImages(f *framework.Framework, pool string) ([]string, error) {
var imgInfos []string var imgInfos []string

View File

@ -85,6 +85,7 @@ var (
deployNFS bool deployNFS bool
testCephFS bool testCephFS bool
testRBD bool testRBD bool
testRBDFSCrypt bool
testNBD bool testNBD bool
testNFS bool testNFS bool
helmTest bool helmTest bool
@ -1025,6 +1026,7 @@ func validatePVCSnapshot(
pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath string, pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath string,
kms, restoreKMS kmsConfig, restoreSCName, kms, restoreKMS kmsConfig, restoreSCName,
dataPool string, f *framework.Framework, dataPool string, f *framework.Framework,
isEncryptedPVC validateFunc,
) { ) {
var wg sync.WaitGroup var wg sync.WaitGroup
wgErrs := make([]error, totalCount) wgErrs := make([]error, totalCount)
@ -1448,7 +1450,7 @@ func validateController(
} }
if scParams["encrypted"] == strconv.FormatBool(true) { if scParams["encrypted"] == strconv.FormatBool(true) {
// check encryption // check encryption
err = isEncryptedPVC(f, resizePvc, app) err = isBlockEncryptedPVC(f, resizePvc, app)
if err != nil { if err != nil {
return err return err
} }

View File

@ -108,6 +108,14 @@ parameters:
# A string is expected here, i.e. "true", not true. # A string is expected here, i.e. "true", not true.
# encrypted: "true" # encrypted: "true"
# (optional) Select the encryption type when encrypted: "true" above.
# Valid values are:
# "file": Enable file encryption on the mounted filesystem
# "block": Encrypt RBD block device
# When unspecified assume type "block". "file" and "block" are
# mutally exclusive.
# encryptionType: "block"
# (optional) Use external key management system for encryption passphrases by # (optional) Use external key management system for encryption passphrases by
# specifying a unique ID matching KMS ConfigMap. The ID is only used for # specifying a unique ID matching KMS ConfigMap. The ID is only used for
# correlation to configmap entry. # correlation to configmap entry.

4
go.mod
View File

@ -14,6 +14,7 @@ require (
github.com/csi-addons/spec v0.1.2-0.20220906123848-52ce69f90900 github.com/csi-addons/spec v0.1.2-0.20220906123848-52ce69f90900
github.com/gemalto/kmip-go v0.0.8-0.20220721195433-3fe83e2d3f26 github.com/gemalto/kmip-go v0.0.8-0.20220721195433-3fe83e2d3f26
github.com/golang/protobuf v1.5.2 github.com/golang/protobuf v1.5.2
github.com/google/fscrypt v0.3.3
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.0
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
@ -23,6 +24,7 @@ require (
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
github.com/onsi/ginkgo/v2 v2.1.6 github.com/onsi/ginkgo/v2 v2.1.6
github.com/onsi/gomega v1.20.1 github.com/onsi/gomega v1.20.1
github.com/pkg/xattr v0.4.7
github.com/prometheus/client_golang v1.12.2 github.com/prometheus/client_golang v1.12.2
github.com/stretchr/testify v1.8.0 github.com/stretchr/testify v1.8.0
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd
@ -149,7 +151,7 @@ require (
go.uber.org/zap v1.21.0 // indirect go.uber.org/zap v1.21.0 // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect golang.org/x/text v0.3.8 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect

11
go.sum
View File

@ -485,6 +485,8 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cadvisor v0.45.0/go.mod h1:vsMT3Uv2XjQ8M7WUtKARV74mU/HN64C4XtM1bJhUKcU= github.com/google/cadvisor v0.45.0/go.mod h1:vsMT3Uv2XjQ8M7WUtKARV74mU/HN64C4XtM1bJhUKcU=
github.com/google/cel-go v0.12.4/go.mod h1:Av7CU6r6X3YmcHR9GXqVDaEJYfEtSxl6wvIjUQTriCw= github.com/google/cel-go v0.12.4/go.mod h1:Av7CU6r6X3YmcHR9GXqVDaEJYfEtSxl6wvIjUQTriCw=
github.com/google/fscrypt v0.3.3 h1:qwx9OCR/xZE68VGr/r0/yugFhlGpIOGsH9JHrttP7vc=
github.com/google/fscrypt v0.3.3/go.mod h1:H1JHtH8BVe0dYNhzx1Ztkn3azQ0OBdoOmM828vEWAXc=
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@ -991,6 +993,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pkg/xattr v0.4.7 h1:XoA3KzmFvyPlH4RwX5eMcgtzcaGBaSvgt3IoFQfbrmQ=
github.com/pkg/xattr v0.4.7/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/portworx/dcos-secrets v0.0.0-20180616013705-8e8ec3f66611/go.mod h1:4hklRW/4DQpLqkcXcjtNprbH2tz/sJaNtqinfPWl/LA= github.com/portworx/dcos-secrets v0.0.0-20180616013705-8e8ec3f66611/go.mod h1:4hklRW/4DQpLqkcXcjtNprbH2tz/sJaNtqinfPWl/LA=
@ -1143,6 +1147,7 @@ github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYp
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
@ -1497,12 +1502,14 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210422114643-f5beecf764ed/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -1515,8 +1522,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1551,6 +1559,7 @@ golang.org/x/tools v0.0.0-20190718200317-82a3ea8a504c/go.mod h1:jcCCGcm9btYwXyDq
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191025023517-2077df36852e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=

View File

@ -40,7 +40,7 @@ func (s *subVolumeClient) supportsSubVolMetadata() bool {
func (s *subVolumeClient) isUnsupportedSubVolMetadata(err error) bool { func (s *subVolumeClient) isUnsupportedSubVolMetadata(err error) bool {
var invalid fsAdmin.NotImplementedError var invalid fsAdmin.NotImplementedError
if err != nil && errors.Is(err, &invalid) { if err != nil && errors.As(err, &invalid) {
// In case the error is other than invalid command return error to the caller. // In case the error is other than invalid command return error to the caller.
clusterAdditionalInfo[s.clusterID].subVolMetadataState = unsupported clusterAdditionalInfo[s.clusterID].subVolMetadataState = unsupported

View File

@ -36,7 +36,7 @@ func (s *snapshotClient) supportsSubVolSnapMetadata() bool {
func (s *snapshotClient) isUnsupportedSubVolSnapMetadata(err error) bool { func (s *snapshotClient) isUnsupportedSubVolSnapMetadata(err error) bool {
var invalid fsAdmin.NotImplementedError var invalid fsAdmin.NotImplementedError
if err != nil && errors.Is(err, &invalid) { if err != nil && errors.As(err, &invalid) {
// In case the error is other than invalid command return error to // In case the error is other than invalid command return error to
// the caller. // the caller.
clusterAdditionalInfo[s.clusterID].subVolSnapshotMetadataState = unsupported clusterAdditionalInfo[s.clusterID].subVolSnapshotMetadataState = unsupported

View File

@ -90,7 +90,7 @@ func CheckVolExists(ctx context.Context,
defer j.Destroy() defer j.Destroy()
imageData, err := j.CheckReservation( imageData, err := j.CheckReservation(
ctx, volOptions.MetadataPool, volOptions.RequestName, volOptions.NamePrefix, "", "") ctx, volOptions.MetadataPool, volOptions.RequestName, volOptions.NamePrefix, "", "", util.EncryptionTypeNone)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -278,7 +278,7 @@ func ReserveVol(ctx context.Context, volOptions *VolumeOptions, secret map[strin
imageUUID, vid.FsSubvolName, err = j.ReserveName( imageUUID, vid.FsSubvolName, err = j.ReserveName(
ctx, volOptions.MetadataPool, util.InvalidPoolID, ctx, volOptions.MetadataPool, util.InvalidPoolID,
volOptions.MetadataPool, util.InvalidPoolID, volOptions.RequestName, volOptions.MetadataPool, util.InvalidPoolID, volOptions.RequestName,
volOptions.NamePrefix, "", "", volOptions.ReservedID, "", volOptions.BackingSnapshotID) volOptions.NamePrefix, "", "", volOptions.ReservedID, "", volOptions.BackingSnapshotID, util.EncryptionTypeNone)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -321,7 +321,7 @@ func ReserveSnap(
imageUUID, vid.FsSnapshotName, err = j.ReserveName( imageUUID, vid.FsSnapshotName, err = j.ReserveName(
ctx, volOptions.MetadataPool, util.InvalidPoolID, ctx, volOptions.MetadataPool, util.InvalidPoolID,
volOptions.MetadataPool, util.InvalidPoolID, snap.RequestName, volOptions.MetadataPool, util.InvalidPoolID, snap.RequestName,
snap.NamePrefix, parentSubVolName, "", snap.ReservedID, "", "") snap.NamePrefix, parentSubVolName, "", snap.ReservedID, "", "", util.EncryptionTypeNone)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -390,7 +390,7 @@ func CheckSnapExists(
defer j.Destroy() defer j.Destroy()
snapData, err := j.CheckReservation( snapData, err := j.CheckReservation(
ctx, volOptions.MetadataPool, snap.RequestName, snap.NamePrefix, volOptions.VolID, "") ctx, volOptions.MetadataPool, snap.RequestName, snap.NamePrefix, volOptions.VolID, "", util.EncryptionTypeNone)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }

View File

@ -149,6 +149,9 @@ type Config struct {
// encryptKMS in which encryption passphrase was saved, default is no encryption // encryptKMS in which encryption passphrase was saved, default is no encryption
encryptKMSKey string encryptKMSKey string
// encryptKMS in which encryption passphrase was saved, default is no encryption
encryptionType string
// ownerKey is used to identify the owner of the volume, can be used with some KMS configurations // ownerKey is used to identify the owner of the volume, can be used with some KMS configurations
ownerKey string ownerKey string
@ -172,6 +175,7 @@ func NewCSIVolumeJournal(suffix string) *Config {
namespace: "", namespace: "",
csiImageIDKey: "csi.imageid", csiImageIDKey: "csi.imageid",
encryptKMSKey: "csi.volume.encryptKMS", encryptKMSKey: "csi.volume.encryptKMS",
encryptionType: "csi.volume.encryptionType",
ownerKey: "csi.volume.owner", ownerKey: "csi.volume.owner",
backingSnapshotIDKey: "csi.volume.backingsnapshotid", backingSnapshotIDKey: "csi.volume.backingsnapshotid",
commonPrefix: "csi.", commonPrefix: "csi.",
@ -191,6 +195,7 @@ func NewCSISnapshotJournal(suffix string) *Config {
namespace: "", namespace: "",
csiImageIDKey: "csi.imageid", csiImageIDKey: "csi.imageid",
encryptKMSKey: "csi.volume.encryptKMS", encryptKMSKey: "csi.volume.encryptKMS",
encryptionType: "csi.volume.encryptionType",
ownerKey: "csi.volume.owner", ownerKey: "csi.volume.owner",
commonPrefix: "csi.", commonPrefix: "csi.",
} }
@ -280,6 +285,7 @@ Return values:
*/ */
func (conn *Connection) CheckReservation(ctx context.Context, func (conn *Connection) CheckReservation(ctx context.Context,
journalPool, reqName, namePrefix, snapParentName, kmsConfig string, journalPool, reqName, namePrefix, snapParentName, kmsConfig string,
encryptionType util.EncryptionType,
) (*ImageData, error) { ) (*ImageData, error) {
var ( var (
snapSource bool snapSource bool
@ -377,6 +383,16 @@ func (conn *Connection) CheckReservation(ctx context.Context,
} }
} }
if encryptionType != util.EncryptionTypeNone {
if savedImageAttributes.EncryptionType != encryptionType {
return nil, fmt.Errorf("internal state inconsistent, omap encryption type"+
" mismatch, request type %q(%d) volume UUID (%s) volume omap encryption type %q (%d)",
util.EncryptionTypeString(encryptionType), encryptionType,
objUUID, util.EncryptionTypeString(savedImageAttributes.EncryptionType),
savedImageAttributes.EncryptionType)
}
}
// TODO: skipping due to excessive poolID to poolname call, also this should never happen! // TODO: skipping due to excessive poolID to poolname call, also this should never happen!
// check if journal pool points back to the passed in journal pool // check if journal pool points back to the passed in journal pool
// if savedJournalPoolID != journalPoolID { // if savedJournalPoolID != journalPoolID {
@ -530,6 +546,7 @@ Input arguments:
- namePrefix: Prefix to use when generating the image/subvolume name (suffix is an auto-generated UUID) - namePrefix: Prefix to use when generating the image/subvolume name (suffix is an auto-generated UUID)
- parentName: Name of the parent image/subvolume if reservation is for a snapshot (optional) - parentName: Name of the parent image/subvolume if reservation is for a snapshot (optional)
- kmsConf: Name of the key management service used to encrypt the image (optional) - kmsConf: Name of the key management service used to encrypt the image (optional)
- encryptionType: Type of encryption used when kmsConf is set (optional)
- volUUID: UUID need to be reserved instead of auto-generating one (this is useful for mirroring and metro-DR) - volUUID: UUID need to be reserved instead of auto-generating one (this is useful for mirroring and metro-DR)
- owner: the owner of the volume (optional) - owner: the owner of the volume (optional)
- backingSnapshotID: ID of the snapshot on which the CephFS snapshot-backed volume is based (optional) - backingSnapshotID: ID of the snapshot on which the CephFS snapshot-backed volume is based (optional)
@ -544,6 +561,7 @@ func (conn *Connection) ReserveName(ctx context.Context,
imagePool string, imagePoolID int64, imagePool string, imagePoolID int64,
reqName, namePrefix, parentName, kmsConf, volUUID, owner, reqName, namePrefix, parentName, kmsConf, volUUID, owner,
backingSnapshotID string, backingSnapshotID string,
encryptionType util.EncryptionType,
) (string, string, error) { ) (string, string, error) {
// TODO: Take in-arg as ImageAttributes? // TODO: Take in-arg as ImageAttributes?
var ( var (
@ -624,6 +642,7 @@ func (conn *Connection) ReserveName(ctx context.Context,
// Update UUID directory to store encryption values // Update UUID directory to store encryption values
if kmsConf != "" { if kmsConf != "" {
omapValues[cj.encryptKMSKey] = kmsConf omapValues[cj.encryptKMSKey] = kmsConf
omapValues[cj.encryptionType] = util.EncryptionTypeString(encryptionType)
} }
// if owner is passed, set it in the UUID directory too // if owner is passed, set it in the UUID directory too
@ -660,14 +679,15 @@ func (conn *Connection) ReserveName(ctx context.Context,
// ImageAttributes contains all CSI stored image attributes, typically as OMap keys. // ImageAttributes contains all CSI stored image attributes, typically as OMap keys.
type ImageAttributes struct { type ImageAttributes struct {
RequestName string // Contains the request name for the passed in UUID RequestName string // Contains the request name for the passed in UUID
SourceName string // Contains the parent image name for the passed in UUID, if it is a snapshot SourceName string // Contains the parent image name for the passed in UUID, if it is a snapshot
ImageName string // Contains the image or subvolume name for the passed in UUID ImageName string // Contains the image or subvolume name for the passed in UUID
KmsID string // Contains encryption KMS, if it is an encrypted image KmsID string // Contains encryption KMS, if it is an encrypted image
Owner string // Contains the owner to be used in combination with KmsID (for some KMS) EncryptionType util.EncryptionType // Type of encryption used, if image encrypted
ImageID string // Contains the image id Owner string // Contains the owner to be used in combination with KmsID (for some KMS)
JournalPoolID int64 // Pool ID of the CSI journal pool, stored in big endian format (on-disk data) ImageID string // Contains the image id
BackingSnapshotID string // ID of the snapshot on which the CephFS snapshot-backed volume is based JournalPoolID int64 // Pool ID of the CSI journal pool, stored in big endian format (on-disk data)
BackingSnapshotID string // ID of the snapshot on which the CephFS snapshot-backed volume is based
} }
// GetImageAttributes fetches all keys and their values, from a UUID directory, returning ImageAttributes structure. // GetImageAttributes fetches all keys and their values, from a UUID directory, returning ImageAttributes structure.
@ -692,6 +712,7 @@ func (conn *Connection) GetImageAttributes(
cj.csiNameKey, cj.csiNameKey,
cj.csiImageKey, cj.csiImageKey,
cj.encryptKMSKey, cj.encryptKMSKey,
cj.encryptionType,
cj.csiJournalPool, cj.csiJournalPool,
cj.cephSnapSourceKey, cj.cephSnapSourceKey,
cj.csiImageIDKey, cj.csiImageIDKey,
@ -711,6 +732,7 @@ func (conn *Connection) GetImageAttributes(
var found bool var found bool
imageAttributes.RequestName = values[cj.csiNameKey] imageAttributes.RequestName = values[cj.csiNameKey]
imageAttributes.KmsID = values[cj.encryptKMSKey] imageAttributes.KmsID = values[cj.encryptKMSKey]
imageAttributes.EncryptionType = util.ParseEncryptionType(values[cj.encryptionType])
imageAttributes.Owner = values[cj.ownerKey] imageAttributes.Owner = values[cj.ownerKey]
imageAttributes.ImageID = values[cj.csiImageIDKey] imageAttributes.ImageID = values[cj.csiImageIDKey]
imageAttributes.BackingSnapshotID = values[cj.backingSnapshotIDKey] imageAttributes.BackingSnapshotID = values[cj.backingSnapshotIDKey]

View File

@ -226,3 +226,7 @@ func (kms *awsMetadataKMS) DecryptDEK(volumeID, encryptedDEK string) (string, er
return string(result.Plaintext), nil return string(result.Plaintext), nil
} }
func (kms *awsMetadataKMS) GetSecret(volumeID string) (string, error) {
return "", ErrGetSecretUnsupported
}

71
internal/kms/dummy.go Normal file
View File

@ -0,0 +1,71 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kms
import "encoding/base64"
type TestDummyFunc func() EncryptionKMS
type ProviderTest struct {
UniqueID string
CreateTestDummy TestDummyFunc
}
type kmsTestProviderList struct {
providers map[string]ProviderTest
}
var kmsTestManager = kmsTestProviderList{providers: map[string]ProviderTest{}}
func RegisterTestProvider(provider ProviderTest) bool {
kmsTestManager.providers[provider.UniqueID] = provider
return true
}
func GetKMSTestDummy(kmsID string) EncryptionKMS {
provider, ok := kmsTestManager.providers[kmsID]
if !ok {
return nil
}
return provider.CreateTestDummy()
}
func GetKMSTestProvider() map[string]ProviderTest {
return kmsTestManager.providers
}
func newDefaultTestDummy() EncryptionKMS {
return secretsKMS{passphrase: base64.URLEncoding.EncodeToString(
[]byte("test dummy passphrase"))}
}
func newSecretsMetadataTestDummy() EncryptionKMS {
smKMS := secretsMetadataKMS{}
smKMS.secretsKMS = secretsKMS{passphrase: base64.URLEncoding.EncodeToString(
[]byte("test dummy passphrase"))}
return smKMS
}
var _ = RegisterTestProvider(ProviderTest{
UniqueID: kmsTypeSecretsMetadata,
CreateTestDummy: newSecretsMetadataTestDummy,
})
var _ = RegisterTestProvider(ProviderTest{
UniqueID: DefaultKMSType,
CreateTestDummy: newDefaultTestDummy,
})

View File

@ -242,3 +242,7 @@ func (kms *keyProtectKMS) DecryptDEK(volumeID, encryptedDEK string) (string, err
return string(result), nil return string(result), nil
} }
func (kms *keyProtectKMS) GetSecret(volumeID string) (string, error) {
return "", ErrGetSecretUnsupported
}

View File

@ -500,6 +500,10 @@ func (kms *kmipKMS) verifyResponse(
return &batchItem, nil return &batchItem, nil
} }
func (kms *kmipKMS) GetSecret(volumeID string) (string, error) {
return "", ErrGetSecretUnsupported
}
// TODO: use the following structs from https://github.com/gemalto/kmip-go // TODO: use the following structs from https://github.com/gemalto/kmip-go
// when https://github.com/ThalesGroup/kmip-go/issues/21 is resolved. // when https://github.com/ThalesGroup/kmip-go/issues/21 is resolved.
// refer: https://docs.oasis-open.org/kmip/spec/v1.4/kmip-spec-v1.4.html. // refer: https://docs.oasis-open.org/kmip/spec/v1.4/kmip-spec-v1.4.html.

View File

@ -19,6 +19,7 @@ package kms
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"os" "os"
@ -53,6 +54,11 @@ const (
DefaultKMSType = "default" DefaultKMSType = "default"
) )
var (
ErrGetSecretUnsupported = errors.New("KMS does not support access to user provided secret")
ErrGetSecretIntegrated = errors.New("integrated DEK stores do not allow GetSecret")
)
// GetKMS returns an instance of Key Management System. // GetKMS returns an instance of Key Management System.
// //
// - tenant is the owner of the Volume, used to fetch the Vault Token from the // - tenant is the owner of the Volume, used to fetch the Vault Token from the
@ -332,6 +338,11 @@ type EncryptionKMS interface {
// function does not need to do anything except return the encyptedDEK // function does not need to do anything except return the encyptedDEK
// as it was received. // as it was received.
DecryptDEK(volumeID, encyptedDEK string) (string, error) DecryptDEK(volumeID, encyptedDEK string) (string, error)
// GetSecret allows external key management systems to
// retrieve keys used in EncryptDEK / DecryptDEK to use them
// directly. Example: fscrypt uses this to unlock raw protectors
GetSecret(volumeID string) (string, error)
} }
// DEKStoreType describes what DEKStore needs to be configured when using a // DEKStoreType describes what DEKStore needs to be configured when using a
@ -377,6 +388,10 @@ func (i integratedDEK) DecryptDEK(volumeID, encyptedDEK string) (string, error)
return encyptedDEK, nil return encyptedDEK, nil
} }
func (i integratedDEK) GetSecret(volumeID string) (string, error) {
return "", ErrGetSecretIntegrated
}
// getKeys takes a map that uses strings for keys and returns a slice with the // getKeys takes a map that uses strings for keys and returns a slice with the
// keys. // keys.
func getKeys(m map[string]interface{}) []string { func getKeys(m map[string]interface{}) []string {

View File

@ -263,6 +263,11 @@ func (kms secretsMetadataKMS) DecryptDEK(volumeID, encryptedDEK string) (string,
return string(dek), nil return string(dek), nil
} }
func (kms secretsMetadataKMS) GetSecret(volumeID string) (string, error) {
// use the passphrase from the secretKMS
return kms.secretsKMS.FetchDEK(volumeID)
}
// generateCipher returns a AEAD cipher based on a passphrase and salt // generateCipher returns a AEAD cipher based on a passphrase and salt
// (volumeID). The cipher can then be used to encrypt/decrypt the DEK. // (volumeID). The cipher can then be used to encrypt/decrypt the DEK.
func generateCipher(passphrase, salt string) (cipher.AEAD, error) { func generateCipher(passphrase, salt string) (cipher.AEAD, error) {

View File

@ -384,7 +384,11 @@ func (cs *ControllerServer) CreateVolume(
metadata := k8s.GetVolumeMetadata(req.GetParameters()) metadata := k8s.GetVolumeMetadata(req.GetParameters())
err = rbdVol.setAllMetadata(metadata) err = rbdVol.setAllMetadata(metadata)
if err != nil { if err != nil {
return nil, err if deleteErr := rbdVol.deleteImage(ctx); deleteErr != nil {
log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, deleteErr)
}
return nil, status.Error(codes.Internal, err.Error())
} }
return buildCreateVolumeResponse(req, rbdVol), nil return buildCreateVolumeResponse(req, rbdVol), nil
@ -1556,7 +1560,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
// 2. Block VolumeMode with Encryption // 2. Block VolumeMode with Encryption
// Hence set nodeExpansion flag based on VolumeMode and Encryption status // Hence set nodeExpansion flag based on VolumeMode and Encryption status
nodeExpansion := true nodeExpansion := true
if req.GetVolumeCapability().GetBlock() != nil && !rbdVol.isEncrypted() { if req.GetVolumeCapability().GetBlock() != nil && !rbdVol.isBlockEncrypted() {
nodeExpansion = false nodeExpansion = false
} }

View File

@ -61,6 +61,13 @@ const (
// DEK is stored. // DEK is stored.
metadataDEK = "rbd.csi.ceph.com/dek" metadataDEK = "rbd.csi.ceph.com/dek"
oldMetadataDEK = ".rbd.csi.ceph.com/dek" oldMetadataDEK = ".rbd.csi.ceph.com/dek"
encryptionPassphraseSize = 20
// rbdDefaultEncryptionType is the default to use when the
// user did not specify an "encryptionType", but set
// "encryption": true.
rbdDefaultEncryptionType = util.EncryptionTypeBlock
) )
// checkRbdImageEncrypted verifies if rbd image was encrypted when created. // checkRbdImageEncrypted verifies if rbd image was encrypted when created.
@ -91,16 +98,30 @@ func (ri *rbdImage) ensureEncryptionMetadataSet(status rbdEncryptionState) error
return nil return nil
} }
// isEncrypted returns `true` if the rbdImage is (or needs to be) encrypted. // isBlockEncrypted returns `true` if the rbdImage is (or needs to be) encrypted.
func (ri *rbdImage) isEncrypted() bool { func (ri *rbdImage) isBlockEncrypted() bool {
return ri.encryption != nil return ri.blockEncryption != nil
} }
// setupEncryption configures the metadata of the RBD image for encryption: // isFileEncrypted returns `true` if the filesystem on the rbdImage is (or needs to be) encrypted.
func (ri *rbdImage) isFileEncrypted() bool {
return ri.fileEncryption != nil
}
func IsFileEncrypted(ctx context.Context, volOptions map[string]string) (bool, error) {
_, encType, err := ParseEncryptionOpts(ctx, volOptions, util.EncryptionTypeInvalid)
if err != nil {
return false, err
}
return encType == util.EncryptionTypeFile, nil
}
// setupBlockEncryption configures the metadata of the RBD image for encryption:
// - the Data-Encryption-Key (DEK) will be generated stored for use by the KMS; // - the Data-Encryption-Key (DEK) will be generated stored for use by the KMS;
// - the RBD image will be marked to support encryption in its metadata. // - the RBD image will be marked to support encryption in its metadata.
func (ri *rbdImage) setupEncryption(ctx context.Context) error { func (ri *rbdImage) setupBlockEncryption(ctx context.Context) error {
err := ri.encryption.StoreNewCryptoPassphrase(ri.VolID) err := ri.blockEncryption.StoreNewCryptoPassphrase(ri.VolID, encryptionPassphraseSize)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to save encryption passphrase for "+ log.ErrorLog(ctx, "failed to save encryption passphrase for "+
"image %s: %s", ri, err) "image %s: %s", ri, err)
@ -130,7 +151,7 @@ func (ri *rbdImage) setupEncryption(ctx context.Context) error {
// (Usecase: Restoring snapshot into a storageclass with different encryption config). // (Usecase: Restoring snapshot into a storageclass with different encryption config).
func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool) error { func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool) error {
// nothing to do if parent image is not encrypted. // nothing to do if parent image is not encrypted.
if !ri.isEncrypted() { if !ri.isBlockEncrypted() && !ri.isFileEncrypted() {
return nil return nil
} }
@ -139,25 +160,54 @@ func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool)
"set!? Call stack: %s", ri, cp, ri.VolID, util.CallStack()) "set!? Call stack: %s", ri, cp, ri.VolID, util.CallStack())
} }
// get the unencrypted passphrase if ri.isBlockEncrypted() {
passphrase, err := ri.encryption.GetCryptoPassphrase(ri.VolID) // get the unencrypted passphrase
if err != nil { passphrase, err := ri.blockEncryption.GetCryptoPassphrase(ri.VolID)
return fmt.Errorf("failed to fetch passphrase for %q: %w", if err != nil {
ri, err) return fmt.Errorf("failed to fetch passphrase for %q: %w",
} ri, err)
}
if !copyOnlyPassphrase { if !copyOnlyPassphrase {
cp.encryption, err = util.NewVolumeEncryption(ri.encryption.GetID(), ri.encryption.KMS) cp.blockEncryption, err = util.NewVolumeEncryption(ri.blockEncryption.GetID(), ri.blockEncryption.KMS)
if errors.Is(err, util.ErrDEKStoreNeeded) { if errors.Is(err, util.ErrDEKStoreNeeded) {
cp.encryption.SetDEKStore(cp) cp.blockEncryption.SetDEKStore(cp)
}
}
// re-encrypt the plain passphrase for the cloned volume
err = cp.blockEncryption.StoreCryptoPassphrase(cp.VolID, passphrase)
if err != nil {
return fmt.Errorf("failed to store passphrase for %q: %w",
cp, err)
} }
} }
// re-encrypt the plain passphrase for the cloned volume if ri.isFileEncrypted() && !copyOnlyPassphrase {
err = cp.encryption.StoreCryptoPassphrase(cp.VolID, passphrase) var err error
if err != nil { cp.fileEncryption, err = util.NewVolumeEncryption(ri.fileEncryption.GetID(), ri.fileEncryption.KMS)
return fmt.Errorf("failed to store passphrase for %q: %w", if errors.Is(err, util.ErrDEKStoreNeeded) {
cp, err) _, err := ri.fileEncryption.KMS.GetSecret("")
if errors.Is(err, kmsapi.ErrGetSecretUnsupported) {
return err
}
}
}
if ri.isFileEncrypted() && ri.fileEncryption.KMS.RequiresDEKStore() == kmsapi.DEKStoreIntegrated {
// get the unencrypted passphrase
passphrase, err := ri.fileEncryption.GetCryptoPassphrase(ri.VolID)
if err != nil {
return fmt.Errorf("failed to fetch passphrase for %q: %w",
ri, err)
}
// re-encrypt the plain passphrase for the cloned volume
err = cp.fileEncryption.StoreCryptoPassphrase(cp.VolID, passphrase)
if err != nil {
return fmt.Errorf("failed to store passphrase for %q: %w",
cp, err)
}
} }
// copy encryption status for the original volume // copy encryption status for the original volume
@ -166,6 +216,7 @@ func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool)
return fmt.Errorf("failed to get encryption status for %q: %w", return fmt.Errorf("failed to get encryption status for %q: %w",
ri, err) ri, err)
} }
err = cp.ensureEncryptionMetadataSet(status) err = cp.ensureEncryptionMetadataSet(status)
if err != nil { if err != nil {
return fmt.Errorf("failed to store encryption status for %q: "+ return fmt.Errorf("failed to store encryption status for %q: "+
@ -178,12 +229,12 @@ func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool)
// repairEncryptionConfig checks the encryption state of the current rbdImage, // repairEncryptionConfig checks the encryption state of the current rbdImage,
// and makes sure that the destination rbdImage has the same configuration. // and makes sure that the destination rbdImage has the same configuration.
func (ri *rbdImage) repairEncryptionConfig(dest *rbdImage) error { func (ri *rbdImage) repairEncryptionConfig(dest *rbdImage) error {
if !ri.isEncrypted() { if !ri.isBlockEncrypted() && !ri.isFileEncrypted() {
return nil return nil
} }
// if ri is encrypted, copy its configuration in case it is missing // if ri is encrypted, copy its configuration in case it is missing
if !dest.isEncrypted() { if !dest.isBlockEncrypted() && !dest.isFileEncrypted() {
// dest needs to be connected to the cluster, otherwise it will // dest needs to be connected to the cluster, otherwise it will
// not be possible to write any metadata // not be possible to write any metadata
if dest.conn == nil { if dest.conn == nil {
@ -197,7 +248,7 @@ func (ri *rbdImage) repairEncryptionConfig(dest *rbdImage) error {
} }
func (ri *rbdImage) encryptDevice(ctx context.Context, devicePath string) error { func (ri *rbdImage) encryptDevice(ctx context.Context, devicePath string) error {
passphrase, err := ri.encryption.GetCryptoPassphrase(ri.VolID) passphrase, err := ri.blockEncryption.GetCryptoPassphrase(ri.VolID)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to get crypto passphrase for %s: %v", log.ErrorLog(ctx, "failed to get crypto passphrase for %s: %v",
ri, err) ri, err)
@ -223,7 +274,7 @@ func (ri *rbdImage) encryptDevice(ctx context.Context, devicePath string) error
} }
func (rv *rbdVolume) openEncryptedDevice(ctx context.Context, devicePath string) (string, error) { func (rv *rbdVolume) openEncryptedDevice(ctx context.Context, devicePath string) (string, error) {
passphrase, err := rv.encryption.GetCryptoPassphrase(rv.VolID) passphrase, err := rv.blockEncryption.GetCryptoPassphrase(rv.VolID)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to get passphrase for encrypted device %s: %v", log.ErrorLog(ctx, "failed to get passphrase for encrypted device %s: %v",
rv, err) rv, err)
@ -255,14 +306,22 @@ func (rv *rbdVolume) openEncryptedDevice(ctx context.Context, devicePath string)
} }
func (ri *rbdImage) initKMS(ctx context.Context, volOptions, credentials map[string]string) error { func (ri *rbdImage) initKMS(ctx context.Context, volOptions, credentials map[string]string) error {
kmsID, err := ri.ParseEncryptionOpts(ctx, volOptions) kmsID, encType, err := ParseEncryptionOpts(ctx, volOptions, rbdDefaultEncryptionType)
if err != nil { if err != nil {
return err return err
} else if kmsID == "" { }
switch encType {
case util.EncryptionTypeBlock:
err = ri.configureBlockEncryption(kmsID, credentials)
case util.EncryptionTypeFile:
err = ri.configureFileEncryption(kmsID, credentials)
case util.EncryptionTypeInvalid:
return fmt.Errorf("invalid encryption type")
case util.EncryptionTypeNone:
return nil return nil
} }
err = ri.configureEncryption(kmsID, credentials)
if err != nil { if err != nil {
return fmt.Errorf("invalid encryption kms configuration: %w", err) return fmt.Errorf("invalid encryption kms configuration: %w", err)
} }
@ -271,10 +330,11 @@ func (ri *rbdImage) initKMS(ctx context.Context, volOptions, credentials map[str
} }
// ParseEncryptionOpts returns kmsID and sets Owner attribute. // ParseEncryptionOpts returns kmsID and sets Owner attribute.
func (ri *rbdImage) ParseEncryptionOpts( func ParseEncryptionOpts(
ctx context.Context, ctx context.Context,
volOptions map[string]string, volOptions map[string]string,
) (string, error) { fallbackEncType util.EncryptionType,
) (string, util.EncryptionType, error) {
var ( var (
err error err error
ok bool ok bool
@ -282,30 +342,57 @@ func (ri *rbdImage) ParseEncryptionOpts(
) )
encrypted, ok = volOptions["encrypted"] encrypted, ok = volOptions["encrypted"]
if !ok { if !ok {
return "", nil return "", util.EncryptionTypeNone, nil
} }
kmsID, err = util.FetchEncryptionKMSID(encrypted, volOptions["encryptionKMSID"]) kmsID, err = util.FetchEncryptionKMSID(encrypted, volOptions["encryptionKMSID"])
if err != nil { if err != nil {
return "", err return "", util.EncryptionTypeInvalid, err
} }
return kmsID, nil encType := util.FetchEncryptionType(volOptions, fallbackEncType)
return kmsID, encType, nil
} }
// configureEncryption sets up the VolumeEncryption for this rbdImage. Once // configureBlockDeviceEncryption sets up the VolumeEncryption for this rbdImage. Once
// configured, use isEncrypted() to see if the volume supports encryption. // configured, use isBlockEncrypted() to see if the volume supports block encryption.
func (ri *rbdImage) configureEncryption(kmsID string, credentials map[string]string) error { func (ri *rbdImage) configureBlockEncryption(kmsID string, credentials map[string]string) error {
kms, err := kmsapi.GetKMS(ri.Owner, kmsID, credentials) kms, err := kmsapi.GetKMS(ri.Owner, kmsID, credentials)
if err != nil { if err != nil {
return err return err
} }
ri.encryption, err = util.NewVolumeEncryption(kmsID, kms) ri.blockEncryption, err = util.NewVolumeEncryption(kmsID, kms)
// if the KMS can not store the DEK itself, we'll store it in the // if the KMS can not store the DEK itself, we'll store it in the
// metadata of the RBD image itself // metadata of the RBD image itself
if errors.Is(err, util.ErrDEKStoreNeeded) { if errors.Is(err, util.ErrDEKStoreNeeded) {
ri.encryption.SetDEKStore(ri) ri.blockEncryption.SetDEKStore(ri)
}
return nil
}
// configureBlockDeviceEncryption sets up the VolumeEncryption for this rbdImage. Once
// configured, use isEncrypted() to see if the volume supports encryption.
func (ri *rbdImage) configureFileEncryption(kmsID string, credentials map[string]string) error {
kms, err := kmsapi.GetKMS(ri.Owner, kmsID, credentials)
if err != nil {
return err
}
ri.fileEncryption, err = util.NewVolumeEncryption(kmsID, kms)
if errors.Is(err, util.ErrDEKStoreNeeded) {
// fscrypt uses secrets directly from the KMS.
// Therefore we do not support an additional DEK
// store. Since not all "metadata" KMS support
// GetSecret, test for support here. Postpone any
// other error handling
_, err := ri.fileEncryption.KMS.GetSecret("")
if errors.Is(err, kmsapi.ErrGetSecretUnsupported) {
return err
}
} }
return nil return nil

View File

@ -27,6 +27,7 @@ import (
csicommon "github.com/ceph/ceph-csi/internal/csi-common" csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/journal" "github.com/ceph/ceph-csi/internal/journal"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/fscrypt"
"github.com/ceph/ceph-csi/internal/util/log" "github.com/ceph/ceph-csi/internal/util/log"
librbd "github.com/ceph/go-ceph/rbd" librbd "github.com/ceph/go-ceph/rbd"
@ -55,8 +56,8 @@ type stageTransaction struct {
isStagePathCreated bool isStagePathCreated bool
// isMounted represents if the volume was mounted or not // isMounted represents if the volume was mounted or not
isMounted bool isMounted bool
// isEncrypted represents if the volume was encrypted or not // isBlockEncrypted represents if the volume was encrypted or not
isEncrypted bool isBlockEncrypted bool
// devicePath represents the path where rbd device is mapped // devicePath represents the path where rbd device is mapped
devicePath string devicePath string
} }
@ -425,12 +426,18 @@ func (ns *NodeServer) stageTransaction(
} }
} }
if volOptions.isEncrypted() { if volOptions.isBlockEncrypted() {
devicePath, err = ns.processEncryptedDevice(ctx, volOptions, devicePath) devicePath, err = ns.processEncryptedDevice(ctx, volOptions, devicePath)
if err != nil { if err != nil {
return transaction, err return transaction, err
} }
transaction.isEncrypted = true transaction.isBlockEncrypted = true
}
if volOptions.isFileEncrypted() {
if err = fscrypt.InitializeNode(ctx); err != nil {
return transaction, fmt.Errorf("file encryption setup for %s failed: %w", volOptions.VolID, err)
}
} }
stagingTargetPath := getStagingTargetPath(req) stagingTargetPath := getStagingTargetPath(req)
@ -444,12 +451,21 @@ func (ns *NodeServer) stageTransaction(
transaction.isStagePathCreated = true transaction.isStagePathCreated = true
// nodeStage Path // nodeStage Path
err = ns.mountVolumeToStagePath(ctx, req, staticVol, stagingTargetPath, devicePath) err = ns.mountVolumeToStagePath(ctx, req, staticVol, stagingTargetPath, devicePath, volOptions.isFileEncrypted())
if err != nil { if err != nil {
return transaction, err return transaction, err
} }
transaction.isMounted = true transaction.isMounted = true
if volOptions.isFileEncrypted() {
log.DebugLog(ctx, "rbd fscrypt: trying to unlock filesystem on %s image %s", stagingTargetPath, volOptions.VolID)
err = fscrypt.Unlock(ctx, volOptions.fileEncryption, stagingTargetPath, volOptions.VolID)
if err != nil {
return transaction, fmt.Errorf("file system encryption unlock in %s image %s failed: %w",
stagingTargetPath, volOptions.VolID, err)
}
}
// As we are supporting the restore of a volume to a bigger size and // As we are supporting the restore of a volume to a bigger size and
// creating bigger size clone from a volume, we need to check filesystem // creating bigger size clone from a volume, we need to check filesystem
// resize is required, if required resize filesystem. // resize is required, if required resize filesystem.
@ -475,13 +491,13 @@ func resizeNodeStagePath(ctx context.Context,
var ok bool var ok bool
// if its a non encrypted block device we dont need any expansion // if its a non encrypted block device we dont need any expansion
if isBlock && !transaction.isEncrypted { if isBlock && !transaction.isBlockEncrypted {
return nil return nil
} }
resizer := mount.NewResizeFs(utilexec.New()) resizer := mount.NewResizeFs(utilexec.New())
if transaction.isEncrypted { if transaction.isBlockEncrypted {
devicePath, err = resizeEncryptedDevice(ctx, volID, stagingTargetPath, devicePath) devicePath, err = resizeEncryptedDevice(ctx, volID, stagingTargetPath, devicePath)
if err != nil { if err != nil {
return status.Error(codes.Internal, err.Error()) return status.Error(codes.Internal, err.Error())
@ -611,7 +627,7 @@ func (ns *NodeServer) undoStagingTransaction(
// Unmapping rbd device // Unmapping rbd device
if transaction.devicePath != "" { if transaction.devicePath != "" {
err = detachRBDDevice(ctx, transaction.devicePath, volID, volOptions.UnmapOptions, transaction.isEncrypted) err = detachRBDDevice(ctx, transaction.devicePath, volID, volOptions.UnmapOptions, transaction.isBlockEncrypted)
if err != nil { if err != nil {
log.ErrorLog( log.ErrorLog(
ctx, ctx,
@ -691,6 +707,17 @@ func (ns *NodeServer) NodePublishVolume(
return &csi.NodePublishVolumeResponse{}, nil return &csi.NodePublishVolumeResponse{}, nil
} }
fileEncrypted, err := IsFileEncrypted(ctx, req.GetVolumeContext())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if fileEncrypted {
stagingPath = fscrypt.AppendEncyptedSubdirectory(stagingPath)
if err = fscrypt.IsDirectoryUnlocked(stagingPath, req.GetVolumeCapability().GetMount().GetFsType()); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
// Publish Path // Publish Path
err = ns.mountVolume(ctx, stagingPath, req) err = ns.mountVolume(ctx, stagingPath, req)
if err != nil { if err != nil {
@ -707,6 +734,7 @@ func (ns *NodeServer) mountVolumeToStagePath(
req *csi.NodeStageVolumeRequest, req *csi.NodeStageVolumeRequest,
staticVol bool, staticVol bool,
stagingPath, devicePath string, stagingPath, devicePath string,
fileEncryption bool,
) error { ) error {
readOnly := false readOnly := false
fsType := req.GetVolumeCapability().GetMount().GetFsType() fsType := req.GetVolumeCapability().GetMount().GetFsType()
@ -751,7 +779,11 @@ func (ns *NodeServer) mountVolumeToStagePath(
args := []string{} args := []string{}
switch fsType { switch fsType {
case "ext4": case "ext4":
args = []string{"-m0", "-Enodiscard,lazy_itable_init=1,lazy_journal_init=1", devicePath} args = []string{"-m0", "-Enodiscard,lazy_itable_init=1,lazy_journal_init=1"}
if fileEncryption {
args = append(args, "-Oencrypt")
}
args = append(args, devicePath)
case "xfs": case "xfs":
args = []string{"-K", devicePath} args = []string{"-K", devicePath}
// always disable reflink // always disable reflink
@ -1146,7 +1178,7 @@ func (ns *NodeServer) processEncryptedDevice(
// CreateVolume. // CreateVolume.
// Use the same setupEncryption() as CreateVolume does, and // Use the same setupEncryption() as CreateVolume does, and
// continue with the common process to crypt-format the device. // continue with the common process to crypt-format the device.
err = volOptions.setupEncryption(ctx) err = volOptions.setupBlockEncryption(ctx)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to setup encryption for rbd"+ log.ErrorLog(ctx, "failed to setup encryption for rbd"+
"image %s: %v", imageSpec, err) "image %s: %v", imageSpec, err)

View File

@ -473,7 +473,7 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util.
imageOrDeviceSpec: imagePath, imageOrDeviceSpec: imagePath,
isImageSpec: true, isImageSpec: true,
isNbd: isNbd, isNbd: isNbd,
encrypted: volOpt.isEncrypted(), encrypted: volOpt.isBlockEncrypted(),
volumeID: volOpt.VolID, volumeID: volOpt.VolID,
unmapOptions: volOpt.UnmapOptions, unmapOptions: volOpt.UnmapOptions,
logDir: volOpt.LogDir, logDir: volOpt.LogDir,

View File

@ -87,6 +87,17 @@ func validateRbdVol(rbdVol *rbdVolume) error {
return err return err
} }
func getEncryptionConfig(rbdVol *rbdVolume) (string, util.EncryptionType) {
switch {
case rbdVol.isBlockEncrypted():
return rbdVol.blockEncryption.GetID(), util.EncryptionTypeBlock
case rbdVol.isFileEncrypted():
return rbdVol.fileEncryption.GetID(), util.EncryptionTypeFile
default:
return "", util.EncryptionTypeNone
}
}
/* /*
checkSnapCloneExists, and its counterpart checkVolExists, function checks if checkSnapCloneExists, and its counterpart checkVolExists, function checks if
the passed in rbdSnapshot or rbdVolume exists on the backend. the passed in rbdSnapshot or rbdVolume exists on the backend.
@ -130,7 +141,7 @@ func checkSnapCloneExists(
defer j.Destroy() defer j.Destroy()
snapData, err := j.CheckReservation(ctx, rbdSnap.JournalPool, snapData, err := j.CheckReservation(ctx, rbdSnap.JournalPool,
rbdSnap.RequestName, rbdSnap.NamePrefix, rbdSnap.RbdImageName, "") rbdSnap.RequestName, rbdSnap.NamePrefix, rbdSnap.RbdImageName, "", util.EncryptionTypeNone)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -245,10 +256,7 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er
return false, err return false, err
} }
kmsID := "" kmsID, encryptionType := getEncryptionConfig(rv)
if rv.isEncrypted() {
kmsID = rv.encryption.GetID()
}
j, err := volJournal.Connect(rv.Monitors, rv.RadosNamespace, rv.conn.Creds) j, err := volJournal.Connect(rv.Monitors, rv.RadosNamespace, rv.conn.Creds)
if err != nil { if err != nil {
@ -257,7 +265,7 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er
defer j.Destroy() defer j.Destroy()
imageData, err := j.CheckReservation( imageData, err := j.CheckReservation(
ctx, rv.JournalPool, rv.RequestName, rv.NamePrefix, "", kmsID) ctx, rv.JournalPool, rv.RequestName, rv.NamePrefix, "", kmsID, encryptionType)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -386,14 +394,12 @@ func reserveSnap(ctx context.Context, rbdSnap *rbdSnapshot, rbdVol *rbdVolume, c
} }
defer j.Destroy() defer j.Destroy()
kmsID := "" kmsID, encryptionType := getEncryptionConfig(rbdVol)
if rbdVol.isEncrypted() {
kmsID = rbdVol.encryption.GetID()
}
rbdSnap.ReservedID, rbdSnap.RbdSnapName, err = j.ReserveName( rbdSnap.ReservedID, rbdSnap.RbdSnapName, err = j.ReserveName(
ctx, rbdSnap.JournalPool, journalPoolID, rbdSnap.Pool, imagePoolID, ctx, rbdSnap.JournalPool, journalPoolID, rbdSnap.Pool, imagePoolID,
rbdSnap.RequestName, rbdSnap.NamePrefix, rbdVol.RbdImageName, kmsID, rbdSnap.ReservedID, rbdVol.Owner, "") rbdSnap.RequestName, rbdSnap.NamePrefix, rbdVol.RbdImageName, kmsID, rbdSnap.ReservedID, rbdVol.Owner,
"", encryptionType)
if err != nil { if err != nil {
return err return err
} }
@ -460,10 +466,7 @@ func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr
return err return err
} }
kmsID := "" kmsID, encryptionType := getEncryptionConfig(rbdVol)
if rbdVol.isEncrypted() {
kmsID = rbdVol.encryption.GetID()
}
j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr) j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
if err != nil { if err != nil {
@ -473,7 +476,7 @@ func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr
rbdVol.ReservedID, rbdVol.RbdImageName, err = j.ReserveName( rbdVol.ReservedID, rbdVol.RbdImageName, err = j.ReserveName(
ctx, rbdVol.JournalPool, journalPoolID, rbdVol.Pool, imagePoolID, ctx, rbdVol.JournalPool, journalPoolID, rbdVol.Pool, imagePoolID,
rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, rbdVol.ReservedID, rbdVol.Owner, "") rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, rbdVol.ReservedID, rbdVol.Owner, "", encryptionType)
if err != nil { if err != nil {
return err return err
} }
@ -548,11 +551,12 @@ func RegenerateJournal(
) (string, error) { ) (string, error) {
ctx := context.Background() ctx := context.Background()
var ( var (
vi util.CSIIdentifier vi util.CSIIdentifier
rbdVol *rbdVolume rbdVol *rbdVolume
kmsID string kmsID string
err error encryptionType util.EncryptionType
ok bool err error
ok bool
) )
rbdVol = &rbdVolume{} rbdVol = &rbdVolume{}
@ -568,7 +572,7 @@ func RegenerateJournal(
rbdVol.Owner = owner rbdVol.Owner = owner
kmsID, err = rbdVol.ParseEncryptionOpts(ctx, volumeAttributes) kmsID, encryptionType, err = ParseEncryptionOpts(ctx, volumeAttributes, util.EncryptionTypeNone)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -605,7 +609,7 @@ func RegenerateJournal(
rbdVol.NamePrefix = volumeAttributes["volumeNamePrefix"] rbdVol.NamePrefix = volumeAttributes["volumeNamePrefix"]
imageData, err := j.CheckReservation( imageData, err := j.CheckReservation(
ctx, rbdVol.JournalPool, rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID) ctx, rbdVol.JournalPool, rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, encryptionType)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -639,7 +643,7 @@ func RegenerateJournal(
rbdVol.ReservedID, rbdVol.RbdImageName, err = j.ReserveName( rbdVol.ReservedID, rbdVol.RbdImageName, err = j.ReserveName(
ctx, rbdVol.JournalPool, journalPoolID, rbdVol.Pool, imagePoolID, ctx, rbdVol.JournalPool, journalPoolID, rbdVol.Pool, imagePoolID,
rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, vi.ObjectUUID, rbdVol.Owner, "") rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, vi.ObjectUUID, rbdVol.Owner, "", encryptionType)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -118,6 +118,7 @@ type rbdImage struct {
ParentPool string ParentPool string
// Cluster name // Cluster name
ClusterName string ClusterName string
// Owner is the creator (tenant, Kubernetes Namespace) of the volume // Owner is the creator (tenant, Kubernetes Namespace) of the volume
Owner string Owner string
@ -130,9 +131,14 @@ type rbdImage struct {
ObjectSize uint64 ObjectSize uint64
ImageFeatureSet librbd.FeatureSet ImageFeatureSet librbd.FeatureSet
// encryption provides access to optional VolumeEncryption functions
encryption *util.VolumeEncryption // blockEncryption provides access to optional VolumeEncryption functions (e.g LUKS)
CreatedAt *timestamp.Timestamp blockEncryption *util.VolumeEncryption
// fileEncryption provides access to optional VolumeEncryption functions (e.g fscrypt)
fileEncryption *util.VolumeEncryption
CreatedAt *timestamp.Timestamp
// conn is a connection to the Ceph cluster obtained from a ConnPool // conn is a connection to the Ceph cluster obtained from a ConnPool
conn *util.ClusterConnection conn *util.ClusterConnection
// an opened IOContext, call .openIoctx() before using // an opened IOContext, call .openIoctx() before using
@ -384,8 +390,11 @@ func (ri *rbdImage) Destroy() {
if ri.conn != nil { if ri.conn != nil {
ri.conn.Destroy() ri.conn.Destroy()
} }
if ri.isEncrypted() { if ri.isBlockEncrypted() {
ri.encryption.Destroy() ri.blockEncryption.Destroy()
}
if ri.isFileEncrypted() {
ri.fileEncryption.Destroy()
} }
} }
@ -438,8 +447,8 @@ func createImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
return fmt.Errorf("failed to create rbd image: %w", err) return fmt.Errorf("failed to create rbd image: %w", err)
} }
if pOpts.isEncrypted() { if pOpts.isBlockEncrypted() {
err = pOpts.setupEncryption(ctx) err = pOpts.setupBlockEncryption(ctx)
if err != nil { if err != nil {
return fmt.Errorf("failed to setup encryption for image %s: %w", pOpts, err) return fmt.Errorf("failed to setup encryption for image %s: %w", pOpts, err)
} }
@ -624,10 +633,17 @@ func (ri *rbdImage) deleteImage(ctx context.Context) error {
return err return err
} }
if ri.isEncrypted() { if ri.isBlockEncrypted() {
log.DebugLog(ctx, "rbd: going to remove DEK for %q", ri) log.DebugLog(ctx, "rbd: going to remove DEK for %q (block encryption)", ri)
if err = ri.encryption.RemoveDEK(ri.VolID); err != nil { if err = ri.blockEncryption.RemoveDEK(ri.VolID); err != nil {
log.WarningLog(ctx, "failed to clean the passphrase for volume %s: %s", ri.VolID, err) log.WarningLog(ctx, "failed to clean the passphrase for volume %s (block encryption): %s", ri.VolID, err)
}
}
if ri.isFileEncrypted() {
log.DebugLog(ctx, "rbd: going to remove DEK for %q (file encryption)", ri)
if err = ri.fileEncryption.RemoveDEK(ri.VolID); err != nil {
log.WarningLog(ctx, "failed to clean the passphrase for volume %s (file encryption): %s", ri.VolID, err)
} }
} }
@ -1008,10 +1024,17 @@ func genSnapFromSnapID(
rbdSnap, err) rbdSnap, err)
} }
if imageAttributes.KmsID != "" { if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeBlock {
err = rbdSnap.configureEncryption(imageAttributes.KmsID, secrets) err = rbdSnap.configureBlockEncryption(imageAttributes.KmsID, secrets)
if err != nil { if err != nil {
return fmt.Errorf("failed to configure encryption for "+ return fmt.Errorf("failed to configure block encryption for "+
"%q: %w", rbdSnap, err)
}
}
if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeFile {
err = rbdSnap.configureFileEncryption(imageAttributes.KmsID, secrets)
if err != nil {
return fmt.Errorf("failed to configure file encryption for "+
"%q: %w", rbdSnap, err) "%q: %w", rbdSnap, err)
} }
} }
@ -1103,8 +1126,14 @@ func generateVolumeFromVolumeID(
rbdVol.ImageID = imageAttributes.ImageID rbdVol.ImageID = imageAttributes.ImageID
rbdVol.Owner = imageAttributes.Owner rbdVol.Owner = imageAttributes.Owner
if imageAttributes.KmsID != "" { if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeBlock {
err = rbdVol.configureEncryption(imageAttributes.KmsID, secrets) err = rbdVol.configureBlockEncryption(imageAttributes.KmsID, secrets)
if err != nil {
return rbdVol, err
}
}
if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeFile {
err = rbdVol.configureFileEncryption(imageAttributes.KmsID, secrets)
if err != nil { if err != nil {
return rbdVol, err return rbdVol, err
} }
@ -1681,7 +1710,7 @@ func stashRBDImageMetadata(volOptions *rbdVolume, metaDataPath string) error {
Pool: volOptions.Pool, Pool: volOptions.Pool,
RadosNamespace: volOptions.RadosNamespace, RadosNamespace: volOptions.RadosNamespace,
ImageName: volOptions.RbdImageName, ImageName: volOptions.RbdImageName,
Encrypted: volOptions.isEncrypted(), Encrypted: volOptions.isBlockEncrypted(),
UnmapOptions: volOptions.UnmapOptions, UnmapOptions: volOptions.UnmapOptions,
} }
@ -1961,11 +1990,13 @@ func (ri *rbdImage) getOrigSnapName(snapID uint64) (string, error) {
} }
func (ri *rbdImage) isCompatibleEncryption(dst *rbdImage) error { func (ri *rbdImage) isCompatibleEncryption(dst *rbdImage) error {
riEncrypted := ri.isBlockEncrypted() || ri.isFileEncrypted()
dstEncrypted := dst.isBlockEncrypted() || dst.isFileEncrypted()
switch { switch {
case ri.isEncrypted() && !dst.isEncrypted(): case riEncrypted && !dstEncrypted:
return fmt.Errorf("cannot create unencrypted volume from encrypted volume %q", ri) return fmt.Errorf("cannot create unencrypted volume from encrypted volume %q", ri)
case !ri.isEncrypted() && dst.isEncrypted(): case !riEncrypted && dstEncrypted:
return fmt.Errorf("cannot create encrypted volume from unencrypted volume %q", ri) return fmt.Errorf("cannot create encrypted volume from unencrypted volume %q", ri)
} }

View File

@ -111,7 +111,8 @@ func generateVolFromSnap(rbdSnap *rbdSnapshot) *rbdVolume {
// copyEncryptionConfig cannot be used here because the volume and the // copyEncryptionConfig cannot be used here because the volume and the
// snapshot will have the same volumeID which cases the panic in // snapshot will have the same volumeID which cases the panic in
// copyEncryptionConfig function. // copyEncryptionConfig function.
vol.encryption = rbdSnap.encryption vol.blockEncryption = rbdSnap.blockEncryption
vol.fileEncryption = rbdSnap.fileEncryption
return vol return vol
} }

View File

@ -36,7 +36,7 @@ const (
// Passphrase size - 20 bytes is 160 bits to satisfy: // Passphrase size - 20 bytes is 160 bits to satisfy:
// https://tools.ietf.org/html/rfc6749#section-10.10 // https://tools.ietf.org/html/rfc6749#section-10.10
encryptionPassphraseSize = 20 defaultEncryptionPassphraseSize = 20
) )
var ( var (
@ -80,6 +80,68 @@ func FetchEncryptionKMSID(encrypted, kmsID string) (string, error) {
return kmsID, nil return kmsID, nil
} }
type EncryptionType int
const (
// EncryptionTypeInvalid signals invalid or unsupported configuration.
EncryptionTypeInvalid EncryptionType = iota
// EncryptionTypeNone disables encryption.
EncryptionTypeNone
// EncryptionTypeBlock enables block encryption.
EncryptionTypeBlock
// EncryptionTypeBlock enables file encryption (fscrypt).
EncryptionTypeFile
)
const (
encryptionTypeBlockString = "block"
encryptionTypeFileString = "file"
)
func ParseEncryptionType(typeStr string) EncryptionType {
switch typeStr {
case encryptionTypeBlockString:
return EncryptionTypeBlock
case encryptionTypeFileString:
return EncryptionTypeFile
case "":
return EncryptionTypeNone
default:
return EncryptionTypeInvalid
}
}
func EncryptionTypeString(encType EncryptionType) string {
switch encType {
case EncryptionTypeBlock:
return encryptionTypeBlockString
case EncryptionTypeFile:
return encryptionTypeFileString
case EncryptionTypeNone:
return ""
case EncryptionTypeInvalid:
return "INVALID"
default:
return "UNKNOWN"
}
}
// FetchEncryptionType returns encryptionType specified in volOptions.
// If not specified, use fallback. If specified but invalid, return
// invalid.
func FetchEncryptionType(volOptions map[string]string, fallback EncryptionType) EncryptionType {
encType, ok := volOptions["encryptionType"]
if !ok {
return fallback
}
if encType == "" {
return EncryptionTypeInvalid
}
return ParseEncryptionType(encType)
}
// NewVolumeEncryption creates a new instance of VolumeEncryption and // NewVolumeEncryption creates a new instance of VolumeEncryption and
// configures the DEKStore. If the KMS does not provide a DEKStore interface, // configures the DEKStore. If the KMS does not provide a DEKStore interface,
// the VolumeEncryption will be created *and* a ErrDEKStoreNeeded is returned. // the VolumeEncryption will be created *and* a ErrDEKStoreNeeded is returned.
@ -156,8 +218,8 @@ func (ve *VolumeEncryption) StoreCryptoPassphrase(volumeID, passphrase string) e
} }
// StoreNewCryptoPassphrase generates a new passphrase and saves it in the KMS. // StoreNewCryptoPassphrase generates a new passphrase and saves it in the KMS.
func (ve *VolumeEncryption) StoreNewCryptoPassphrase(volumeID string) error { func (ve *VolumeEncryption) StoreNewCryptoPassphrase(volumeID string, length int) error {
passphrase, err := generateNewEncryptionPassphrase() passphrase, err := generateNewEncryptionPassphrase(length)
if err != nil { if err != nil {
return fmt.Errorf("failed to generate passphrase for %s: %w", volumeID, err) return fmt.Errorf("failed to generate passphrase for %s: %w", volumeID, err)
} }
@ -176,8 +238,8 @@ func (ve *VolumeEncryption) GetCryptoPassphrase(volumeID string) (string, error)
} }
// generateNewEncryptionPassphrase generates a random passphrase for encryption. // generateNewEncryptionPassphrase generates a random passphrase for encryption.
func generateNewEncryptionPassphrase() (string, error) { func generateNewEncryptionPassphrase(length int) (string, error) {
bytesPassphrase := make([]byte, encryptionPassphraseSize) bytesPassphrase := make([]byte, length)
_, err := rand.Read(bytesPassphrase) _, err := rand.Read(bytesPassphrase)
if err != nil { if err != nil {
return "", err return "", err

View File

@ -28,14 +28,14 @@ import (
func TestGenerateNewEncryptionPassphrase(t *testing.T) { func TestGenerateNewEncryptionPassphrase(t *testing.T) {
t.Parallel() t.Parallel()
b64Passphrase, err := generateNewEncryptionPassphrase() b64Passphrase, err := generateNewEncryptionPassphrase(defaultEncryptionPassphraseSize)
require.NoError(t, err) require.NoError(t, err)
// b64Passphrase is URL-encoded, decode to verify the length of the // b64Passphrase is URL-encoded, decode to verify the length of the
// passphrase // passphrase
passphrase, err := base64.URLEncoding.DecodeString(b64Passphrase) passphrase, err := base64.URLEncoding.DecodeString(b64Passphrase)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, encryptionPassphraseSize, len(passphrase)) assert.Equal(t, defaultEncryptionPassphraseSize, len(passphrase))
} }
func TestKMSWorkflow(t *testing.T) { func TestKMSWorkflow(t *testing.T) {
@ -56,10 +56,41 @@ func TestKMSWorkflow(t *testing.T) {
volumeID := "volume-id" volumeID := "volume-id"
err = ve.StoreNewCryptoPassphrase(volumeID) err = ve.StoreNewCryptoPassphrase(volumeID, defaultEncryptionPassphraseSize)
assert.NoError(t, err) assert.NoError(t, err)
passphrase, err := ve.GetCryptoPassphrase(volumeID) passphrase, err := ve.GetCryptoPassphrase(volumeID)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, secrets["encryptionPassphrase"], passphrase) assert.Equal(t, secrets["encryptionPassphrase"], passphrase)
} }
func TestEncryptionType(t *testing.T) {
t.Parallel()
assert.EqualValues(t, EncryptionTypeInvalid, ParseEncryptionType("wat?"))
assert.EqualValues(t, EncryptionTypeInvalid, ParseEncryptionType("both"))
assert.EqualValues(t, EncryptionTypeInvalid, ParseEncryptionType("file,block"))
assert.EqualValues(t, EncryptionTypeInvalid, ParseEncryptionType("block,file"))
assert.EqualValues(t, EncryptionTypeBlock, ParseEncryptionType("block"))
assert.EqualValues(t, EncryptionTypeFile, ParseEncryptionType("file"))
assert.EqualValues(t, EncryptionTypeNone, ParseEncryptionType(""))
for _, s := range []string{"file", "block", ""} {
assert.EqualValues(t, s, EncryptionTypeString(ParseEncryptionType(s)))
}
}
func TestFetchEncryptionType(t *testing.T) {
t.Parallel()
volOpts := map[string]string{}
assert.EqualValues(t, EncryptionTypeBlock, FetchEncryptionType(volOpts, EncryptionTypeBlock))
assert.EqualValues(t, EncryptionTypeFile, FetchEncryptionType(volOpts, EncryptionTypeFile))
assert.EqualValues(t, EncryptionTypeNone, FetchEncryptionType(volOpts, EncryptionTypeNone))
volOpts["encryptionType"] = ""
assert.EqualValues(t, EncryptionTypeInvalid, FetchEncryptionType(volOpts, EncryptionTypeNone))
volOpts["encryptionType"] = "block"
assert.EqualValues(t, EncryptionTypeBlock, FetchEncryptionType(volOpts, EncryptionTypeNone))
volOpts["encryptionType"] = "file"
assert.EqualValues(t, EncryptionTypeFile, FetchEncryptionType(volOpts, EncryptionTypeNone))
volOpts["encryptionType"] = "INVALID"
assert.EqualValues(t, EncryptionTypeInvalid, FetchEncryptionType(volOpts, EncryptionTypeNone))
}

View File

@ -0,0 +1,439 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fscrypt
/*
#include <linux/fs.h>
*/
import "C"
import (
"context"
"errors"
"fmt"
"os"
"os/user"
"path"
"time"
"unsafe"
fscryptactions "github.com/google/fscrypt/actions"
fscryptcrypto "github.com/google/fscrypt/crypto"
fscryptfilesystem "github.com/google/fscrypt/filesystem"
fscryptmetadata "github.com/google/fscrypt/metadata"
"github.com/pkg/xattr"
"golang.org/x/sys/unix"
"github.com/ceph/ceph-csi/internal/kms"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
)
const (
FscryptHashingTimeTarget = 1 * time.Second
FscryptProtectorPrefix = "ceph-csi"
FscryptSubdir = "ceph-csi-encrypted"
encryptionPassphraseSize = 64
)
var policyV2Support = []util.KernelVersion{
{
Version: 5,
PatchLevel: 4,
SubLevel: 0,
ExtraVersion: 0,
Distribution: "",
Backport: false,
},
}
func AppendEncyptedSubdirectory(dir string) string {
return path.Join(dir, FscryptSubdir)
}
// getPassphrase returns the passphrase from the configured Ceph CSI KMS to be used as a protector key in fscrypt.
func getPassphrase(ctx context.Context, encryption util.VolumeEncryption, volID string) (string, error) {
var (
passphrase string
err error
)
switch encryption.KMS.RequiresDEKStore() {
case kms.DEKStoreIntegrated:
passphrase, err = encryption.GetCryptoPassphrase(volID)
if err != nil {
log.ErrorLog(ctx, "fscrypt: failed to get passphrase from KMS: %v", err)
return "", err
}
case kms.DEKStoreMetadata:
passphrase, err = encryption.KMS.GetSecret(volID)
if err != nil {
log.ErrorLog(ctx, "fscrypt: failed to GetSecret: %v", err)
return "", err
}
}
return passphrase, nil
}
// createKeyFuncFromVolumeEncryption returns an fscrypt key function returning
// encryption keys form a VolumeEncryption struct.
func createKeyFuncFromVolumeEncryption(
ctx context.Context,
encryption util.VolumeEncryption,
volID string,
) (func(fscryptactions.ProtectorInfo, bool) (*fscryptcrypto.Key, error), error) {
keyFunc := func(info fscryptactions.ProtectorInfo, retry bool) (*fscryptcrypto.Key, error) {
passphrase, err := getPassphrase(ctx, encryption, volID)
if err != nil {
return nil, err
}
key, err := fscryptcrypto.NewBlankKey(encryptionPassphraseSize / 2)
copy(key.Data(), passphrase)
return key, err
}
return keyFunc, nil
}
// fsyncEncryptedDirectory calls sync on dirPath. It is intended to
// work around the fscrypt library not syncing the directory it sets a
// policy on.
// TODO Remove when the fscrypt dependency has https://github.com/google/fscrypt/pull/359
func fsyncEncryptedDirectory(dirPath string) error {
dir, err := os.Open(dirPath)
if err != nil {
return err
}
defer dir.Close()
return dir.Sync()
}
// unlockExisting tries to unlock an already set up fscrypt directory using keys from Ceph CSI.
func unlockExisting(
ctx context.Context,
fscryptContext *fscryptactions.Context,
encryptedPath string, protectorName string,
keyFn func(fscryptactions.ProtectorInfo, bool) (*fscryptcrypto.Key, error),
) error {
var err error
policy, err := fscryptactions.GetPolicyFromPath(fscryptContext, encryptedPath)
if err != nil {
log.ErrorLog(ctx, "fscrypt: policy get failed %v", err)
return err
}
optionFn := func(policyDescriptor string, options []*fscryptactions.ProtectorOption) (int, error) {
for idx, option := range options {
if option.Name() == protectorName {
return idx, nil
}
}
return 0, &fscryptactions.ErrNotProtected{PolicyDescriptor: policyDescriptor, ProtectorDescriptor: protectorName}
}
if err = policy.Unlock(optionFn, keyFn); err != nil {
log.ErrorLog(ctx, "fscrypt: unlock with protector error: %v", err)
return err
}
defer func() {
err = policy.Lock()
if err != nil {
log.ErrorLog(ctx, "fscrypt: failed to lock policy after use: %v", err)
}
}()
if err = policy.Provision(); err != nil {
log.ErrorLog(ctx, "fscrypt: provision fail %v", err)
return err
}
log.DebugLog(ctx, "fscrypt protector unlock: %s %+v", protectorName, policy)
return nil
}
func initializeAndUnlock(
ctx context.Context,
fscryptContext *fscryptactions.Context,
encryptedPath string, protectorName string,
keyFn func(fscryptactions.ProtectorInfo, bool) (*fscryptcrypto.Key, error),
) error {
var owner *user.User
var err error
if err = os.Mkdir(encryptedPath, 0o755); err != nil {
return err
}
protector, err := fscryptactions.CreateProtector(fscryptContext, protectorName, keyFn, owner)
if err != nil {
log.ErrorLog(ctx, "fscrypt: protector name=%s create failed: %v. reverting.", protectorName, err)
if revertErr := protector.Revert(); revertErr != nil {
return revertErr
}
return err
}
if err = protector.Unlock(keyFn); err != nil {
return err
}
log.DebugLog(ctx, "fscrypt protector unlock: %+v", protector)
var policy *fscryptactions.Policy
if policy, err = fscryptactions.CreatePolicy(fscryptContext, protector); err != nil {
return err
}
defer func() {
err = policy.Lock()
if err != nil {
log.ErrorLog(ctx, "fscrypt: failed to lock policy after init: %w")
err = policy.Revert()
if err != nil {
log.ErrorLog(ctx, "fscrypt: failed to revert policy after failed lock: %w")
}
}
}()
if err = policy.UnlockWithProtector(protector); err != nil {
log.ErrorLog(ctx, "fscrypt: Failed to unlock policy: %v", err)
return err
}
if err = policy.Provision(); err != nil {
log.ErrorLog(ctx, "fscrypt: Failed to provision policy: %v", err)
return err
}
if err = policy.Apply(encryptedPath); err != nil {
log.ErrorLog(ctx, "fscrypt: Failed to apply protector (see also kernel log): %w", err)
if err = policy.Deprovision(false); err != nil {
log.ErrorLog(ctx, "fscrypt: Policy cleanup response to failing apply failed: %w", err)
}
return err
}
if err = fsyncEncryptedDirectory(encryptedPath); err != nil {
log.ErrorLog(ctx, "fscrypt: fsync encrypted dir - to flush kernel policy to disk failed %v", err)
return err
}
return nil
}
// getInodeEncryptedAttribute returns the inode's encrypt attribute similar to lsattr(1)
func getInodeEncryptedAttribute(p string) (bool, error) {
file, err := os.Open(p)
if err != nil {
return false, err
}
defer file.Close()
var attr int
_, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), unix.FS_IOC_GETFLAGS,
uintptr(unsafe.Pointer(&attr)))
if errno != 0 {
return false, fmt.Errorf("error calling ioctl_iflags: %w", errno)
}
if attr&C.FS_ENCRYPT_FL != 0 {
return true, nil
}
return false, nil
}
// IsDirectoryUnlockedFscrypt checks if a directory is an unlocked fscrypted directory.
func IsDirectoryUnlocked(directoryPath, filesystem string) error {
if _, err := fscryptmetadata.GetPolicy(directoryPath); err != nil {
return fmt.Errorf("no fscrypt policy set on directory %q: %w", directoryPath, err)
}
switch filesystem {
case "ceph":
_, err := xattr.Get(directoryPath, "ceph.fscrypt.auth")
if err != nil {
return fmt.Errorf("error reading ceph.fscrypt.auth xattr on %q: %w", directoryPath, err)
}
default:
encrypted, err := getInodeEncryptedAttribute(directoryPath)
if err != nil {
return err
}
if !encrypted {
return fmt.Errorf("path %s does not have the encrypted inode flag set. Encryption init must have failed",
directoryPath)
}
}
return nil
}
func getBestPolicyVersion() (int64, error) {
// fetch the current running kernel info
release, err := util.GetKernelVersion()
if err != nil {
return 0, fmt.Errorf("fetching current kernel version failed: %w", err)
}
switch {
case util.CheckKernelSupport(release, policyV2Support):
return 2, nil
default:
return 1, nil
}
}
// InitializeNode performs once per nodeserver initialization
// required by the fscrypt library. Creates /etc/fscrypt.conf.
func InitializeNode(ctx context.Context) error {
policyVersion, err := getBestPolicyVersion()
if err != nil {
return fmt.Errorf("fscrypt node init failed to determine best policy version: %w", err)
}
err = fscryptactions.CreateConfigFile(FscryptHashingTimeTarget, policyVersion)
if err != nil {
existsError := &fscryptactions.ErrConfigFileExists{}
if errors.As(err, &existsError) {
log.ErrorLog(ctx, "fscrypt: config file %q already exists. Skipping fscrypt node setup",
existsError.Path)
return nil
}
return fmt.Errorf("fscrypt node init failed to create node configuration (/etc/fscrypt.conf): %w",
err)
}
return nil
}
// FscryptUnlock unlocks possilby creating fresh fscrypt metadata
// iff a volume is encrypted. Otherwise return immediately Calling
// this function requires that InitializeFscrypt ran once on this node.
func Unlock(
ctx context.Context,
volEncryption *util.VolumeEncryption,
stagingTargetPath string, volID string,
) error {
// Fetches keys from KMS. Do this first to catch KMS errors before setting up anything.
keyFn, err := createKeyFuncFromVolumeEncryption(ctx, *volEncryption, volID)
if err != nil {
log.ErrorLog(ctx, "fscrypt: could not create key function: %v", err)
return err
}
err = fscryptfilesystem.UpdateMountInfo()
if err != nil {
return err
}
fscryptContext, err := fscryptactions.NewContextFromMountpoint(stagingTargetPath, nil)
if err != nil {
log.ErrorLog(ctx, "fscrypt: failed to create context from mountpoint %v: %w", stagingTargetPath, err)
return err
}
fscryptContext.Config.UseFsKeyringForV1Policies = true
log.DebugLog(ctx, "fscrypt context: %+v", fscryptContext)
if err = fscryptContext.Mount.CheckSupport(); err != nil {
log.ErrorLog(ctx, "fscrypt: filesystem mount %s does not support fscrypt", fscryptContext.Mount)
return err
}
// A proper set up fscrypy directory requires metadata and a kernel policy:
// 1. Do we have a metadata directory (.fscrypt) set up?
metadataDirExists := false
if err = fscryptContext.Mount.Setup(fscryptfilesystem.SingleUserWritable); err != nil {
alreadySetupErr := &fscryptfilesystem.ErrAlreadySetup{}
if errors.As(err, &alreadySetupErr) {
log.DebugLog(ctx, "fscrypt: metadata directory in %q already set up", alreadySetupErr.Mount.Path)
metadataDirExists = true
} else {
log.ErrorLog(ctx, "fscrypt: mount setup failed: %v", err)
return err
}
}
encryptedPath := path.Join(stagingTargetPath, FscryptSubdir)
kernelPolicyExists := false
// 2. Ask the kernel if the directory has an fscrypt policy in place.
if _, err = fscryptmetadata.GetPolicy(encryptedPath); err == nil { // encrypted directory already set up
kernelPolicyExists = true
}
if metadataDirExists != kernelPolicyExists {
return fmt.Errorf("fscrypt: unsupported state metadata=%t kernel_policy=%t",
metadataDirExists, kernelPolicyExists)
}
protectorName := FscryptProtectorPrefix
switch volEncryption.KMS.RequiresDEKStore() {
case kms.DEKStoreMetadata:
// Metadata style KMS use the KMS secret as a custom
// passphrase directly in fscrypt, circumenting key
// derivation on the CSI side to allow users to fall
// back on the fscrypt commandline tool easily
fscryptContext.Config.Source = fscryptmetadata.SourceType_custom_passphrase
case kms.DEKStoreIntegrated:
fscryptContext.Config.Source = fscryptmetadata.SourceType_raw_key
}
if kernelPolicyExists && metadataDirExists {
log.DebugLog(ctx, "fscrypt: Encrypted directory already set up, policy exists")
return unlockExisting(ctx, fscryptContext, encryptedPath, protectorName, keyFn)
}
if !kernelPolicyExists && !metadataDirExists {
log.DebugLog(ctx, "fscrypt: Creating new protector and policy")
if volEncryption.KMS.RequiresDEKStore() == kms.DEKStoreIntegrated {
if err := volEncryption.StoreNewCryptoPassphrase(volID, encryptionPassphraseSize); err != nil {
log.ErrorLog(ctx, "fscrypt: store new crypto passphrase failed: %v", err)
return err
}
}
return initializeAndUnlock(ctx, fscryptContext, encryptedPath, protectorName, keyFn)
}
return fmt.Errorf("unsupported")
}

View File

@ -0,0 +1,52 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"errors"
"testing"
kmsapi "github.com/ceph/ceph-csi/internal/kms"
"github.com/stretchr/testify/assert"
)
func TestGetPassphraseFromKMS(t *testing.T) {
t.Parallel()
for _, provider := range kmsapi.GetKMSTestProvider() {
if provider.CreateTestDummy == nil {
continue
}
kms := kmsapi.GetKMSTestDummy(provider.UniqueID)
assert.NotNil(t, kms)
volEnc, err := NewVolumeEncryption(provider.UniqueID, kms)
if errors.Is(err, ErrDEKStoreNeeded) {
_, err = volEnc.KMS.GetSecret("")
if errors.Is(err, kmsapi.ErrGetSecretUnsupported) {
continue // currently unsupported by fscrypt integration
}
}
assert.NotNil(t, volEnc)
if kms.RequiresDEKStore() == kmsapi.DEKStoreIntegrated {
continue
}
secret, err := kms.GetSecret("")
assert.NoError(t, err, provider.UniqueID)
assert.NotEmpty(t, secret, provider.UniqueID)
}
}

View File

@ -3,8 +3,13 @@ all
#Refer below url for more information about the markdown rules. #Refer below url for more information about the markdown rules.
#https://github.com/markdownlint/markdownlint/blob/master/docs/RULES.md #https://github.com/markdownlint/markdownlint/blob/master/docs/RULES.md
rule 'MD013', :code_blocks => false, :tables => false, :line_length => 80 rule 'MD013', :ignore_code_blocks => false, :tables => false, :line_length => 80
exclude_rule 'MD033' # In-line HTML: GitHub style markdown adds HTML tags exclude_rule 'MD033' # In-line HTML: GitHub style markdown adds HTML tags
exclude_rule 'MD040' # Fenced code blocks should have a language specified exclude_rule 'MD040' # Fenced code blocks should have a language specified
exclude_rule 'MD041' # First line in file should be a top level header exclude_rule 'MD041' # First line in file should be a top level header
# TODO: Enable the rules after making required changes.
exclude_rule 'MD007' # Unordered list indentation
exclude_rule 'MD012' # Multiple consecutive blank lines
exclude_rule 'MD013' # Line length
exclude_rule 'MD047' # File should end with a single newline character

View File

@ -180,6 +180,7 @@ function disable_storage_addons() {
# configure minikube # configure minikube
MINIKUBE_ARCH=${MINIKUBE_ARCH:-"amd64"} MINIKUBE_ARCH=${MINIKUBE_ARCH:-"amd64"}
MINIKUBE_VERSION=${MINIKUBE_VERSION:-"latest"} MINIKUBE_VERSION=${MINIKUBE_VERSION:-"latest"}
MINIKUBE_ISO_URL=${MINIKUBE_ISO_URL:-""}
KUBE_VERSION=${KUBE_VERSION:-"latest"} KUBE_VERSION=${KUBE_VERSION:-"latest"}
CONTAINER_CMD=${CONTAINER_CMD:-"docker"} CONTAINER_CMD=${CONTAINER_CMD:-"docker"}
MEMORY=${MEMORY:-"4096"} MEMORY=${MEMORY:-"4096"}
@ -206,6 +207,10 @@ else
DISK_CONFIG="" DISK_CONFIG=""
fi fi
if [[ -n "${MINIKUBE_ISO_URL}" ]]; then
EXTRA_CONFIG="${EXTRA_CONFIG} --iso-url ${MINIKUBE_ISO_URL}"
fi
# configure csi image version # configure csi image version
CSI_IMAGE_VERSION=${CSI_IMAGE_VERSION:-"canary"} CSI_IMAGE_VERSION=${CSI_IMAGE_VERSION:-"canary"}

202
vendor/github.com/google/fscrypt/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

132
vendor/github.com/google/fscrypt/actions/callback.go generated vendored Normal file
View File

@ -0,0 +1,132 @@
/*
* callback.go - defines how the caller of an action function passes along a key
* to be used in this package.
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package actions
import (
"log"
"github.com/pkg/errors"
"github.com/google/fscrypt/crypto"
"github.com/google/fscrypt/filesystem"
"github.com/google/fscrypt/metadata"
)
// ProtectorInfo is the information a caller will receive about a Protector
// before they have to return the corresponding key. This is currently a
// read-only view of metadata.ProtectorData.
type ProtectorInfo struct {
data *metadata.ProtectorData
}
// Descriptor is the Protector's descriptor used to uniquely identify it.
func (pi *ProtectorInfo) Descriptor() string { return pi.data.GetProtectorDescriptor() }
// Source indicates the type of the descriptor (how it should be unlocked).
func (pi *ProtectorInfo) Source() metadata.SourceType { return pi.data.GetSource() }
// Name is used to describe custom passphrase and raw key descriptors.
func (pi *ProtectorInfo) Name() string { return pi.data.GetName() }
// UID is used to identify the user for login passphrases.
func (pi *ProtectorInfo) UID() int64 { return pi.data.GetUid() }
// KeyFunc is passed to a function that will require some type of key.
// The info parameter is provided so the callback knows which key to provide.
// The retry parameter indicates that a previous key provided by this callback
// was incorrect (this allows for user feedback like "incorrect passphrase").
//
// For passphrase sources, the returned key should be a passphrase. For raw
// sources, the returned key should be a 256-bit cryptographic key. Consumers
// of the callback will wipe the returned key. An error returned by the callback
// will be propagated back to the caller.
type KeyFunc func(info ProtectorInfo, retry bool) (*crypto.Key, error)
// getWrappingKey uses the provided callback to get the wrapping key
// corresponding to the ProtectorInfo. This runs the passphrase hash for
// passphrase sources or just relays the callback for raw sources.
func getWrappingKey(info ProtectorInfo, keyFn KeyFunc, retry bool) (*crypto.Key, error) {
// For raw key sources, we can just use the key directly.
if info.Source() == metadata.SourceType_raw_key {
return keyFn(info, retry)
}
// Run the passphrase hash for other sources.
passphrase, err := keyFn(info, retry)
if err != nil {
return nil, err
}
defer passphrase.Wipe()
log.Printf("running passphrase hash for protector %s", info.Descriptor())
return crypto.PassphraseHash(passphrase, info.data.Salt, info.data.Costs)
}
// unwrapProtectorKey uses the provided callback and ProtectorInfo to return
// the unwrapped protector key. This will repeatedly call keyFn to get the
// wrapping key until the correct key is returned by the callback or the
// callback returns an error.
func unwrapProtectorKey(info ProtectorInfo, keyFn KeyFunc) (*crypto.Key, error) {
retry := false
for {
wrappingKey, err := getWrappingKey(info, keyFn, retry)
if err != nil {
return nil, err
}
protectorKey, err := crypto.Unwrap(wrappingKey, info.data.WrappedKey)
wrappingKey.Wipe()
switch errors.Cause(err) {
case nil:
log.Printf("valid wrapping key for protector %s", info.Descriptor())
return protectorKey, nil
case crypto.ErrBadAuth:
// After the first failure, we let the callback know we are retrying.
log.Printf("invalid wrapping key for protector %s", info.Descriptor())
retry = true
continue
default:
return nil, err
}
}
}
// ProtectorOption is information about a protector relative to a Policy.
type ProtectorOption struct {
ProtectorInfo
// LinkedMount is the mountpoint for a linked protector. It is nil if
// the protector is not a linked protector (or there is a LoadError).
LinkedMount *filesystem.Mount
// LoadError is non-nil if there was an error in getting the data for
// the protector.
LoadError error
}
// OptionFunc is passed to a function that needs to unlock a Policy.
// The callback is used to specify which protector should be used to unlock a
// Policy. The descriptor indicates which Policy we are using, while the options
// correspond to the valid Protectors protecting the Policy.
//
// The OptionFunc should either return a valid index into options, which
// corresponds to the desired protector, or an error (which will be propagated
// back to the caller).
type OptionFunc func(policyDescriptor string, options []*ProtectorOption) (int, error)

293
vendor/github.com/google/fscrypt/actions/config.go generated vendored Normal file
View File

@ -0,0 +1,293 @@
/*
* config.go - Actions for creating a new config file, which includes new
* hashing costs and the config file's location.
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package actions
import (
"bytes"
"fmt"
"log"
"os"
"runtime"
"time"
"golang.org/x/sys/unix"
"github.com/google/fscrypt/crypto"
"github.com/google/fscrypt/filesystem"
"github.com/google/fscrypt/metadata"
"github.com/google/fscrypt/util"
)
// ConfigFileLocation is the location of fscrypt's global settings. This can be
// overridden by the user of this package.
var ConfigFileLocation = "/etc/fscrypt.conf"
// ErrBadConfig is an internal error that indicates that the config struct is invalid.
type ErrBadConfig struct {
Config *metadata.Config
UnderlyingError error
}
func (err *ErrBadConfig) Error() string {
return fmt.Sprintf(`internal error: config is invalid: %s
The invalid config is %s`, err.UnderlyingError, err.Config)
}
// ErrBadConfigFile indicates that the config file is invalid.
type ErrBadConfigFile struct {
Path string
UnderlyingError error
}
func (err *ErrBadConfigFile) Error() string {
return fmt.Sprintf("%q is invalid: %s", err.Path, err.UnderlyingError)
}
// ErrConfigFileExists indicates that the config file already exists.
type ErrConfigFileExists struct {
Path string
}
func (err *ErrConfigFileExists) Error() string {
return fmt.Sprintf("%q already exists", err.Path)
}
// ErrNoConfigFile indicates that the config file doesn't exist.
type ErrNoConfigFile struct {
Path string
}
func (err *ErrNoConfigFile) Error() string {
return fmt.Sprintf("%q doesn't exist", err.Path)
}
const (
// Permissions of the config file (global readable)
configPermissions = 0644
// Config file should be created for writing and not already exist
createFlags = os.O_CREATE | os.O_WRONLY | os.O_EXCL
// 128 MiB is a large enough amount of memory to make the password hash
// very difficult to brute force on specialized hardware, but small
// enough to work on most GNU/Linux systems.
maxMemoryBytes = 128 * 1024 * 1024
)
var (
timingPassphrase = []byte("I am a fake passphrase")
timingSalt = bytes.Repeat([]byte{42}, metadata.SaltLen)
)
// CreateConfigFile creates a new config file at the appropriate location with
// the appropriate hashing costs and encryption parameters. The hashing will be
// configured to take as long as the specified time target. In addition, the
// version of encryption policy to use may be overridden from the default of v1.
func CreateConfigFile(target time.Duration, policyVersion int64) error {
// Create the config file before computing the hashing costs, so we fail
// immediately if the program has insufficient permissions.
configFile, err := filesystem.OpenFileOverridingUmask(ConfigFileLocation,
createFlags, configPermissions)
switch {
case os.IsExist(err):
return &ErrConfigFileExists{ConfigFileLocation}
case err != nil:
return err
}
defer configFile.Close()
config := &metadata.Config{
Source: metadata.DefaultSource,
Options: metadata.DefaultOptions,
}
if policyVersion != 0 {
config.Options.PolicyVersion = policyVersion
}
if config.HashCosts, err = getHashingCosts(target); err != nil {
return err
}
log.Printf("Creating config at %q with %v\n", ConfigFileLocation, config)
return metadata.WriteConfig(config, configFile)
}
// getConfig returns the current configuration struct. Any fields not specified
// in the config file use the system defaults. An error is returned if the
// config file hasn't been setup with CreateConfigFile yet or the config
// contains invalid data.
func getConfig() (*metadata.Config, error) {
configFile, err := os.Open(ConfigFileLocation)
switch {
case os.IsNotExist(err):
return nil, &ErrNoConfigFile{ConfigFileLocation}
case err != nil:
return nil, err
}
defer configFile.Close()
log.Printf("Reading config from %q\n", ConfigFileLocation)
config, err := metadata.ReadConfig(configFile)
if err != nil {
return nil, &ErrBadConfigFile{ConfigFileLocation, err}
}
// Use system defaults if not specified
if config.Source == metadata.SourceType_default {
config.Source = metadata.DefaultSource
log.Printf("Falling back to source of %q", config.Source.String())
}
if config.Options.Padding == 0 {
config.Options.Padding = metadata.DefaultOptions.Padding
log.Printf("Falling back to padding of %d", config.Options.Padding)
}
if config.Options.Contents == metadata.EncryptionOptions_default {
config.Options.Contents = metadata.DefaultOptions.Contents
log.Printf("Falling back to contents mode of %q", config.Options.Contents)
}
if config.Options.Filenames == metadata.EncryptionOptions_default {
config.Options.Filenames = metadata.DefaultOptions.Filenames
log.Printf("Falling back to filenames mode of %q", config.Options.Filenames)
}
if config.Options.PolicyVersion == 0 {
config.Options.PolicyVersion = metadata.DefaultOptions.PolicyVersion
log.Printf("Falling back to policy version of %d", config.Options.PolicyVersion)
}
if err := config.CheckValidity(); err != nil {
return nil, &ErrBadConfigFile{ConfigFileLocation, err}
}
return config, nil
}
// getHashingCosts returns hashing costs so that hashing a password will take
// approximately the target time. This is done using the total amount of RAM,
// the number of CPUs present, and by running the passphrase hash many times.
func getHashingCosts(target time.Duration) (*metadata.HashingCosts, error) {
log.Printf("Finding hashing costs that take %v\n", target)
// Start out with the minimal possible costs that use all the CPUs.
nCPUs := int64(runtime.NumCPU())
costs := &metadata.HashingCosts{
Time: 1,
Memory: 8 * nCPUs,
Parallelism: nCPUs,
}
// If even the minimal costs are not fast enough, just return the
// minimal costs and log a warning.
t, err := timeHashingCosts(costs)
if err != nil {
return nil, err
}
log.Printf("Min Costs={%v}\t-> %v\n", costs, t)
if t > target {
log.Printf("time exceeded the target of %v.\n", target)
return costs, nil
}
// Now we start doubling the costs until we reach the target.
memoryKiBLimit := memoryBytesLimit() / 1024
for {
// Store a copy of the previous costs
costsPrev := *costs
tPrev := t
// Double the memory up to the max, then double the time.
if costs.Memory < memoryKiBLimit {
costs.Memory = util.MinInt64(2*costs.Memory, memoryKiBLimit)
} else {
costs.Time *= 2
}
// If our hashing failed, return the last good set of costs.
if t, err = timeHashingCosts(costs); err != nil {
log.Printf("Hashing with costs={%v} failed: %v\n", costs, err)
return &costsPrev, nil
}
log.Printf("Costs={%v}\t-> %v\n", costs, t)
// If we have reached the target time, we return a set of costs
// based on the linear interpolation between the last two times.
if t >= target {
f := float64(target-tPrev) / float64(t-tPrev)
return &metadata.HashingCosts{
Time: betweenCosts(costsPrev.Time, costs.Time, f),
Memory: betweenCosts(costsPrev.Memory, costs.Memory, f),
Parallelism: costs.Parallelism,
}, nil
}
}
}
// memoryBytesLimit returns the maximum amount of memory we will use for
// passphrase hashing. This will never be more than a reasonable maximum (for
// compatibility) or an 8th the available system RAM.
func memoryBytesLimit() int64 {
// The sysinfo syscall only fails if given a bad address
var info unix.Sysinfo_t
err := unix.Sysinfo(&info)
util.NeverError(err)
totalRAMBytes := int64(info.Totalram)
return util.MinInt64(totalRAMBytes/8, maxMemoryBytes)
}
// betweenCosts returns a cost between a and b. Specifically, it returns the
// floor of a + f*(b-a). This way, f=0 returns a and f=1 returns b.
func betweenCosts(a, b int64, f float64) int64 {
return a + int64(f*float64(b-a))
}
// timeHashingCosts runs the passphrase hash with the specified costs and
// returns the time it takes to hash the passphrase.
func timeHashingCosts(costs *metadata.HashingCosts) (time.Duration, error) {
passphrase, err := crypto.NewKeyFromReader(bytes.NewReader(timingPassphrase))
if err != nil {
return 0, err
}
defer passphrase.Wipe()
// Be sure to measure CPU time, not wall time (time.Now)
begin := cpuTimeInNanoseconds()
hash, err := crypto.PassphraseHash(passphrase, timingSalt, costs)
if err == nil {
hash.Wipe()
}
end := cpuTimeInNanoseconds()
// This uses a lot of memory, run the garbage collector
runtime.GC()
return time.Duration((end - begin) / costs.Parallelism), nil
}
// cpuTimeInNanoseconds returns the nanosecond count based on the process's CPU usage.
// This number has no absolute meaning, only relative meaning to other calls.
func cpuTimeInNanoseconds() int64 {
var ts unix.Timespec
err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts)
// ClockGettime fails if given a bad address or on a VERY old system.
util.NeverError(err)
return unix.TimespecToNsec(ts)
}

184
vendor/github.com/google/fscrypt/actions/context.go generated vendored Normal file
View File

@ -0,0 +1,184 @@
/*
* context.go - top-level interface to fscrypt packages
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
// Package actions is the high-level interface to the fscrypt packages. The
// functions here roughly correspond with commands for the tool in cmd/fscrypt.
// All of the actions include a significant amount of logging, so that good
// output can be provided for cmd/fscrypt's verbose mode.
// The top-level actions currently include:
// - Creating a new config file
// - Creating a context on which to perform actions
// - Creating, unlocking, and modifying Protectors
// - Creating, unlocking, and modifying Policies
package actions
import (
"log"
"os/user"
"github.com/pkg/errors"
"github.com/google/fscrypt/filesystem"
"github.com/google/fscrypt/keyring"
"github.com/google/fscrypt/metadata"
"github.com/google/fscrypt/util"
)
// ErrLocked indicates that the key hasn't been unwrapped yet.
var ErrLocked = errors.New("key needs to be unlocked first")
// Context contains the necessary global state to perform most of fscrypt's
// actions.
type Context struct {
// Config is the struct loaded from the global config file. It can be
// modified after being loaded to customise parameters.
Config *metadata.Config
// Mount is the filesystem relative to which all Protectors and Policies
// are added, edited, removed, and applied, and to which policies using
// the filesystem keyring are provisioned.
Mount *filesystem.Mount
// TargetUser is the user for whom protectors are created, and to whose
// keyring policies using the user keyring are provisioned. It's also
// the user for whom the keys are claimed in the filesystem keyring when
// v2 policies are provisioned.
TargetUser *user.User
// TrustedUser is the user for whom policies and protectors are allowed
// to be read. Specifically, if TrustedUser is set, then only
// policies and protectors owned by TrustedUser or by root will be
// allowed to be read. If it's nil, then all policies and protectors
// the process has filesystem-level read access to will be allowed.
TrustedUser *user.User
}
// NewContextFromPath makes a context for the filesystem containing the
// specified path and whose Config is loaded from the global config file. On
// success, the Context contains a valid Config and Mount. The target user
// defaults to the current effective user if none is specified.
func NewContextFromPath(path string, targetUser *user.User) (*Context, error) {
ctx, err := newContextFromUser(targetUser)
if err != nil {
return nil, err
}
if ctx.Mount, err = filesystem.FindMount(path); err != nil {
return nil, err
}
log.Printf("%s is on %s filesystem %q (%s)", path,
ctx.Mount.FilesystemType, ctx.Mount.Path, ctx.Mount.Device)
return ctx, nil
}
// NewContextFromMountpoint makes a context for the filesystem at the specified
// mountpoint and whose Config is loaded from the global config file. On
// success, the Context contains a valid Config and Mount. The target user
// defaults to the current effective user if none is specified.
func NewContextFromMountpoint(mountpoint string, targetUser *user.User) (*Context, error) {
ctx, err := newContextFromUser(targetUser)
if err != nil {
return nil, err
}
if ctx.Mount, err = filesystem.GetMount(mountpoint); err != nil {
return nil, err
}
log.Printf("found %s filesystem %q (%s)", ctx.Mount.FilesystemType,
ctx.Mount.Path, ctx.Mount.Device)
return ctx, nil
}
// newContextFromUser makes a context with the corresponding target user, and
// whose Config is loaded from the global config file. If the target user is
// nil, the effective user is used.
func newContextFromUser(targetUser *user.User) (*Context, error) {
var err error
if targetUser == nil {
if targetUser, err = util.EffectiveUser(); err != nil {
return nil, err
}
}
ctx := &Context{TargetUser: targetUser}
if ctx.Config, err = getConfig(); err != nil {
return nil, err
}
// By default, when running as a non-root user we only read policies and
// protectors owned by the user or root. When running as root, we allow
// reading all policies and protectors.
if !ctx.Config.GetAllowCrossUserMetadata() && !util.IsUserRoot() {
ctx.TrustedUser, err = util.EffectiveUser()
if err != nil {
return nil, err
}
}
log.Printf("creating context for user %q", targetUser.Username)
return ctx, nil
}
// checkContext verifies that the context contains a valid config and a mount
// which is being used with fscrypt.
func (ctx *Context) checkContext() error {
if err := ctx.Config.CheckValidity(); err != nil {
return &ErrBadConfig{ctx.Config, err}
}
return ctx.Mount.CheckSetup(ctx.TrustedUser)
}
func (ctx *Context) getKeyringOptions() *keyring.Options {
return &keyring.Options{
Mount: ctx.Mount,
User: ctx.TargetUser,
UseFsKeyringForV1Policies: ctx.Config.GetUseFsKeyringForV1Policies(),
}
}
// getProtectorOption returns the ProtectorOption for the protector on the
// context's mountpoint with the specified descriptor.
func (ctx *Context) getProtectorOption(protectorDescriptor string) *ProtectorOption {
mnt, data, err := ctx.Mount.GetProtector(protectorDescriptor, ctx.TrustedUser)
if err != nil {
return &ProtectorOption{ProtectorInfo{}, nil, err}
}
info := ProtectorInfo{data}
// No linked path if on the same mountpoint
if mnt == ctx.Mount {
return &ProtectorOption{info, nil, nil}
}
return &ProtectorOption{info, mnt, nil}
}
// ProtectorOptions creates a slice of all the options for all of the Protectors
// on the Context's mountpoint.
func (ctx *Context) ProtectorOptions() ([]*ProtectorOption, error) {
if err := ctx.checkContext(); err != nil {
return nil, err
}
descriptors, err := ctx.Mount.ListProtectors(ctx.TrustedUser)
if err != nil {
return nil, err
}
options := make([]*ProtectorOption, len(descriptors))
for i, descriptor := range descriptors {
options[i] = ctx.getProtectorOption(descriptor)
}
return options, nil
}

622
vendor/github.com/google/fscrypt/actions/policy.go generated vendored Normal file
View File

@ -0,0 +1,622 @@
/*
* policy.go - functions for dealing with policies
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package actions
import (
"fmt"
"log"
"os"
"os/user"
"github.com/golang/protobuf/proto"
"github.com/pkg/errors"
"github.com/google/fscrypt/crypto"
"github.com/google/fscrypt/filesystem"
"github.com/google/fscrypt/keyring"
"github.com/google/fscrypt/metadata"
"github.com/google/fscrypt/util"
)
// ErrAccessDeniedPossiblyV2 indicates that a directory's encryption policy
// couldn't be retrieved due to "permission denied", but it looks like it's due
// to the directory using a v2 policy but the kernel not supporting it.
type ErrAccessDeniedPossiblyV2 struct {
DirPath string
}
func (err *ErrAccessDeniedPossiblyV2) Error() string {
return fmt.Sprintf(`
failed to get encryption policy of %s: permission denied
This may be caused by the directory using a v2 encryption policy and the
current kernel not supporting it. If indeed the case, then this
directory can only be used on kernel v5.4 and later. You can create
directories accessible on older kernels by changing policy_version to 1
in %s.`,
err.DirPath, ConfigFileLocation)
}
// ErrAlreadyProtected indicates that a policy is already protected by the given
// protector.
type ErrAlreadyProtected struct {
Policy *Policy
Protector *Protector
}
func (err *ErrAlreadyProtected) Error() string {
return fmt.Sprintf("policy %s is already protected by protector %s",
err.Policy.Descriptor(), err.Protector.Descriptor())
}
// ErrDifferentFilesystem indicates that a policy can't be applied to a
// directory on a different filesystem.
type ErrDifferentFilesystem struct {
PolicyMount *filesystem.Mount
PathMount *filesystem.Mount
}
func (err *ErrDifferentFilesystem) Error() string {
return fmt.Sprintf(`cannot apply policy from filesystem %q to a
directory on filesystem %q. Policies may only protect files on the same
filesystem.`, err.PolicyMount.Path, err.PathMount.Path)
}
// ErrMissingPolicyMetadata indicates that a directory is encrypted but its
// policy metadata cannot be found.
type ErrMissingPolicyMetadata struct {
Mount *filesystem.Mount
DirPath string
Descriptor string
}
func (err *ErrMissingPolicyMetadata) Error() string {
return fmt.Sprintf(`filesystem %q does not contain the policy metadata
for %q. This directory has either been encrypted with another tool (such
as e4crypt), or the file %q has been deleted.`,
err.Mount.Path, err.DirPath,
err.Mount.PolicyPath(err.Descriptor))
}
// ErrNotProtected indicates that the given policy is not protected by the given
// protector.
type ErrNotProtected struct {
PolicyDescriptor string
ProtectorDescriptor string
}
func (err *ErrNotProtected) Error() string {
return fmt.Sprintf(`policy %s is not protected by protector %s`,
err.PolicyDescriptor, err.ProtectorDescriptor)
}
// ErrOnlyProtector indicates that the last protector can't be removed from a
// policy.
type ErrOnlyProtector struct {
Policy *Policy
}
func (err *ErrOnlyProtector) Error() string {
return fmt.Sprintf(`cannot remove the only protector from policy %s. A
policy must have at least one protector.`, err.Policy.Descriptor())
}
// ErrPolicyMetadataMismatch indicates that the policy metadata for an encrypted
// directory is inconsistent with that directory.
type ErrPolicyMetadataMismatch struct {
DirPath string
Mount *filesystem.Mount
PathData *metadata.PolicyData
MountData *metadata.PolicyData
}
func (err *ErrPolicyMetadataMismatch) Error() string {
return fmt.Sprintf(`inconsistent metadata between encrypted directory %q
and its corresponding metadata file %q.
Directory has descriptor:%s %s
Metadata file has descriptor:%s %s`,
err.DirPath, err.Mount.PolicyPath(err.PathData.KeyDescriptor),
err.PathData.KeyDescriptor, err.PathData.Options,
err.MountData.KeyDescriptor, err.MountData.Options)
}
// PurgeAllPolicies removes all policy keys on the filesystem from the kernel
// keyring. In order for this to fully take effect, the filesystem may also need
// to be unmounted or caches dropped.
func PurgeAllPolicies(ctx *Context) error {
if err := ctx.checkContext(); err != nil {
return err
}
policies, err := ctx.Mount.ListPolicies(nil)
if err != nil {
return err
}
for _, policyDescriptor := range policies {
err = keyring.RemoveEncryptionKey(policyDescriptor, ctx.getKeyringOptions(), false)
switch errors.Cause(err) {
case nil, keyring.ErrKeyNotPresent:
// We don't care if the key has already been removed
case keyring.ErrKeyFilesOpen:
log.Printf("Key for policy %s couldn't be fully removed because some files are still in-use",
policyDescriptor)
case keyring.ErrKeyAddedByOtherUsers:
log.Printf("Key for policy %s couldn't be fully removed because other user(s) have added it too",
policyDescriptor)
default:
return err
}
}
return nil
}
// Policy represents an unlocked policy, so it contains the PolicyData as well
// as the actual protector key. These unlocked Polices can then be applied to a
// directory, or have their key material inserted into the keyring (which will
// allow encrypted files to be accessed). As with the key struct, a Policy
// should be wiped after use.
type Policy struct {
Context *Context
data *metadata.PolicyData
key *crypto.Key
created bool
ownerIfCreating *user.User
newLinkedProtectors []string
}
// CreatePolicy creates a Policy protected by given Protector and stores the
// appropriate data on the filesystem. On error, no data is changed on the
// filesystem.
func CreatePolicy(ctx *Context, protector *Protector) (*Policy, error) {
if err := ctx.checkContext(); err != nil {
return nil, err
}
// Randomly create the underlying policy key (and wipe if we fail)
key, err := crypto.NewRandomKey(metadata.PolicyKeyLen)
if err != nil {
return nil, err
}
keyDescriptor, err := crypto.ComputeKeyDescriptor(key, ctx.Config.Options.PolicyVersion)
if err != nil {
key.Wipe()
return nil, err
}
policy := &Policy{
Context: ctx,
data: &metadata.PolicyData{
Options: ctx.Config.Options,
KeyDescriptor: keyDescriptor,
},
key: key,
created: true,
}
policy.ownerIfCreating, err = getOwnerOfMetadataForProtector(protector)
if err != nil {
policy.Lock()
return nil, err
}
if err = policy.AddProtector(protector); err != nil {
policy.Lock()
return nil, err
}
return policy, nil
}
// GetPolicy retrieves a locked policy with a specific descriptor. The Policy is
// still locked in this case, so it must be unlocked before using certain
// methods.
func GetPolicy(ctx *Context, descriptor string) (*Policy, error) {
if err := ctx.checkContext(); err != nil {
return nil, err
}
data, err := ctx.Mount.GetPolicy(descriptor, ctx.TrustedUser)
if err != nil {
return nil, err
}
log.Printf("got data for %s from %q", descriptor, ctx.Mount.Path)
return &Policy{Context: ctx, data: data}, nil
}
// GetPolicyFromPath returns the locked policy descriptor for a file on the
// filesystem. The Policy is still locked in this case, so it must be unlocked
// before using certain methods. An error is returned if the metadata is
// inconsistent or the path is not encrypted.
func GetPolicyFromPath(ctx *Context, path string) (*Policy, error) {
if err := ctx.checkContext(); err != nil {
return nil, err
}
// We double check that the options agree for both the data we get from
// the path, and the data we get from the mountpoint.
pathData, err := metadata.GetPolicy(path)
err = ctx.Mount.EncryptionSupportError(err)
if err != nil {
// On kernels that don't support v2 encryption policies, trying
// to open a directory with a v2 policy simply gave EACCES. This
// is ambiguous with other errors, but try to detect this case
// and show a better error message.
if os.IsPermission(err) &&
filesystem.HaveReadAccessTo(path) &&
!keyring.IsFsKeyringSupported(ctx.Mount) {
return nil, &ErrAccessDeniedPossiblyV2{path}
}
return nil, err
}
descriptor := pathData.KeyDescriptor
log.Printf("found policy %s for %q", descriptor, path)
mountData, err := ctx.Mount.GetPolicy(descriptor, ctx.TrustedUser)
if err != nil {
log.Printf("getting policy metadata: %v", err)
if _, ok := err.(*filesystem.ErrPolicyNotFound); ok {
return nil, &ErrMissingPolicyMetadata{ctx.Mount, path, descriptor}
}
return nil, err
}
log.Printf("found data for policy %s on %q", descriptor, ctx.Mount.Path)
if !proto.Equal(pathData.Options, mountData.Options) ||
pathData.KeyDescriptor != mountData.KeyDescriptor {
return nil, &ErrPolicyMetadataMismatch{path, ctx.Mount, pathData, mountData}
}
log.Print("data from filesystem and path agree")
return &Policy{Context: ctx, data: mountData}, nil
}
// ProtectorOptions creates a slice of ProtectorOptions for the protectors
// protecting this policy.
func (policy *Policy) ProtectorOptions() []*ProtectorOption {
options := make([]*ProtectorOption, len(policy.data.WrappedPolicyKeys))
for i, wrappedPolicyKey := range policy.data.WrappedPolicyKeys {
options[i] = policy.Context.getProtectorOption(wrappedPolicyKey.ProtectorDescriptor)
}
return options
}
// ProtectorDescriptors creates a slice of the Protector descriptors for the
// protectors protecting this policy.
func (policy *Policy) ProtectorDescriptors() []string {
descriptors := make([]string, len(policy.data.WrappedPolicyKeys))
for i, wrappedPolicyKey := range policy.data.WrappedPolicyKeys {
descriptors[i] = wrappedPolicyKey.ProtectorDescriptor
}
return descriptors
}
// Descriptor returns the key descriptor for this policy.
func (policy *Policy) Descriptor() string {
return policy.data.KeyDescriptor
}
// Options returns the encryption options of this policy.
func (policy *Policy) Options() *metadata.EncryptionOptions {
return policy.data.Options
}
// Version returns the version of this policy.
func (policy *Policy) Version() int64 {
return policy.data.Options.PolicyVersion
}
// Destroy removes a policy from the filesystem. It also removes any new
// protector links that were created for the policy. This does *not* wipe the
// policy's internal key from memory; use Lock() to do that.
func (policy *Policy) Destroy() error {
for _, protectorDescriptor := range policy.newLinkedProtectors {
policy.Context.Mount.RemoveProtector(protectorDescriptor)
}
return policy.Context.Mount.RemovePolicy(policy.Descriptor())
}
// Revert destroys a policy if it was created, but does nothing if it was just
// queried from the filesystem.
func (policy *Policy) Revert() error {
if !policy.created {
return nil
}
return policy.Destroy()
}
func (policy *Policy) String() string {
return fmt.Sprintf("Policy: %s\nMountpoint: %s\nOptions: %v\nProtectors:%+v",
policy.Descriptor(), policy.Context.Mount, policy.data.Options,
policy.ProtectorDescriptors())
}
// Unlock unwraps the Policy's internal key. As a Protector is needed to unlock
// the Policy, callbacks to select the Policy and get the key are needed. This
// method will retry the keyFn as necessary to get the correct key for the
// selected protector. Does nothing if policy is already unlocked.
func (policy *Policy) Unlock(optionFn OptionFunc, keyFn KeyFunc) error {
if policy.key != nil {
return nil
}
options := policy.ProtectorOptions()
// The OptionFunc indicates which option and wrapped key we should use.
idx, err := optionFn(policy.Descriptor(), options)
if err != nil {
return err
}
option := options[idx]
if option.LoadError != nil {
return option.LoadError
}
log.Printf("protector %s selected in callback", option.Descriptor())
protectorKey, err := unwrapProtectorKey(option.ProtectorInfo, keyFn)
if err != nil {
return err
}
defer protectorKey.Wipe()
log.Printf("unwrapping policy %s with protector", policy.Descriptor())
wrappedPolicyKey := policy.data.WrappedPolicyKeys[idx].WrappedKey
policy.key, err = crypto.Unwrap(protectorKey, wrappedPolicyKey)
return err
}
// UnlockWithProtector uses an unlocked Protector to unlock a policy. An error
// is returned if the Protector is not yet unlocked or does not protect the
// policy. Does nothing if policy is already unlocked.
func (policy *Policy) UnlockWithProtector(protector *Protector) error {
if policy.key != nil {
return nil
}
if protector.key == nil {
return ErrLocked
}
idx, ok := policy.findWrappedKeyIndex(protector.Descriptor())
if !ok {
return &ErrNotProtected{policy.Descriptor(), protector.Descriptor()}
}
var err error
wrappedPolicyKey := policy.data.WrappedPolicyKeys[idx].WrappedKey
policy.key, err = crypto.Unwrap(protector.key, wrappedPolicyKey)
return err
}
// Lock wipes a Policy's internal Key. It should always be called after using a
// Policy. This is often done with a defer statement. There is no effect if
// called multiple times.
func (policy *Policy) Lock() error {
err := policy.key.Wipe()
policy.key = nil
return err
}
// UsesProtector returns if the policy is protected with the protector
func (policy *Policy) UsesProtector(protector *Protector) bool {
_, ok := policy.findWrappedKeyIndex(protector.Descriptor())
return ok
}
// getOwnerOfMetadataForProtector returns the User to whom the owner of any new
// policies or protector links for the given protector should be set.
//
// This will return a non-nil value only when the protector is a login protector
// and the process is running as root. In this scenario, root is setting up
// encryption on the user's behalf, so we need to make new policies and
// protector links owned by the user (rather than root) to allow them to be read
// by the user, just like the login protector itself which is handled elsewhere.
func getOwnerOfMetadataForProtector(protector *Protector) (*user.User, error) {
if protector.data.Source == metadata.SourceType_pam_passphrase && util.IsUserRoot() {
owner, err := util.UserFromUID(protector.data.Uid)
if err != nil {
return nil, err
}
return owner, nil
}
return nil, nil
}
// AddProtector updates the data that is wrapping the Policy Key so that the
// provided Protector is now protecting the specified Policy. If an error is
// returned, no data has been changed. If the policy and protector are on
// different filesystems, a link will be created between them. The policy and
// protector must both be unlocked.
func (policy *Policy) AddProtector(protector *Protector) error {
if policy.UsesProtector(protector) {
return &ErrAlreadyProtected{policy, protector}
}
if policy.key == nil || protector.key == nil {
return ErrLocked
}
// If the protector is on a different filesystem, we need to add a link
// to it on the policy's filesystem.
if policy.Context.Mount != protector.Context.Mount {
log.Printf("policy on %s\n protector on %s\n", policy.Context.Mount, protector.Context.Mount)
ownerIfCreating, err := getOwnerOfMetadataForProtector(protector)
if err != nil {
return err
}
isNewLink, err := policy.Context.Mount.AddLinkedProtector(
protector.Descriptor(), protector.Context.Mount,
protector.Context.TrustedUser, ownerIfCreating)
if err != nil {
return err
}
if isNewLink {
policy.newLinkedProtectors = append(policy.newLinkedProtectors,
protector.Descriptor())
}
} else {
log.Printf("policy and protector both on %q", policy.Context.Mount)
}
// Create the wrapped policy key
wrappedKey, err := crypto.Wrap(protector.key, policy.key)
if err != nil {
return err
}
// Append the wrapped key to the data
policy.addKey(&metadata.WrappedPolicyKey{
ProtectorDescriptor: protector.Descriptor(),
WrappedKey: wrappedKey,
})
if err := policy.commitData(); err != nil {
// revert the addition on failure
policy.removeKey(len(policy.data.WrappedPolicyKeys) - 1)
return err
}
return nil
}
// RemoveProtector updates the data that is wrapping the Policy Key so that the
// protector with the given descriptor is no longer protecting the specified
// Policy. If an error is returned, no data has been changed. Note that the
// protector itself won't be removed, nor will a link to the protector be
// removed (in the case where the protector and policy are on different
// filesystems). The policy can be locked or unlocked.
func (policy *Policy) RemoveProtector(protectorDescriptor string) error {
idx, ok := policy.findWrappedKeyIndex(protectorDescriptor)
if !ok {
return &ErrNotProtected{policy.Descriptor(), protectorDescriptor}
}
if len(policy.data.WrappedPolicyKeys) == 1 {
return &ErrOnlyProtector{policy}
}
// Remove the wrapped key from the data
toRemove := policy.removeKey(idx)
if err := policy.commitData(); err != nil {
// revert the removal on failure (order is irrelevant)
policy.addKey(toRemove)
return err
}
return nil
}
// Apply sets the Policy on a specified directory. Currently we impose the
// additional constraint that policies and the directories they are applied to
// must reside on the same filesystem.
func (policy *Policy) Apply(path string) error {
if pathMount, err := filesystem.FindMount(path); err != nil {
return err
} else if pathMount != policy.Context.Mount {
return &ErrDifferentFilesystem{policy.Context.Mount, pathMount}
}
err := metadata.SetPolicy(path, policy.data)
return policy.Context.Mount.EncryptionSupportError(err)
}
// GetProvisioningStatus returns the status of this policy's key in the keyring.
func (policy *Policy) GetProvisioningStatus() keyring.KeyStatus {
status, _ := keyring.GetEncryptionKeyStatus(policy.Descriptor(),
policy.Context.getKeyringOptions())
return status
}
// IsProvisionedByTargetUser returns true if the policy's key is present in the
// target kernel keyring, but not if that keyring is a filesystem keyring and
// the key only been added by users other than Context.TargetUser.
func (policy *Policy) IsProvisionedByTargetUser() bool {
return policy.GetProvisioningStatus() == keyring.KeyPresent
}
// Provision inserts the Policy key into the kernel keyring. This allows reading
// and writing of files encrypted with this directory. Requires unlocked Policy.
func (policy *Policy) Provision() error {
if policy.key == nil {
return ErrLocked
}
return keyring.AddEncryptionKey(policy.key, policy.Descriptor(),
policy.Context.getKeyringOptions())
}
// Deprovision removes the Policy key from the kernel keyring. This prevents
// reading and writing to the directory --- unless the target keyring is a user
// keyring, in which case caches must be dropped too. If the Policy key was
// already removed, returns keyring.ErrKeyNotPresent.
func (policy *Policy) Deprovision(allUsers bool) error {
return keyring.RemoveEncryptionKey(policy.Descriptor(),
policy.Context.getKeyringOptions(), allUsers)
}
// NeedsUserKeyring returns true if Provision and Deprovision for this policy
// will use a user keyring (deprecated), not a filesystem keyring.
func (policy *Policy) NeedsUserKeyring() bool {
return policy.Version() == 1 && !policy.Context.Config.GetUseFsKeyringForV1Policies()
}
// NeedsRootToProvision returns true if Provision and Deprovision will require
// root for this policy in the current configuration.
func (policy *Policy) NeedsRootToProvision() bool {
return policy.Version() == 1 && policy.Context.Config.GetUseFsKeyringForV1Policies()
}
// CanBeAppliedWithoutProvisioning returns true if this process can apply this
// policy to a directory without first calling Provision.
func (policy *Policy) CanBeAppliedWithoutProvisioning() bool {
return policy.Version() == 1 || util.IsUserRoot()
}
// commitData writes the Policy's current data to the filesystem.
func (policy *Policy) commitData() error {
return policy.Context.Mount.AddPolicy(policy.data, policy.ownerIfCreating)
}
// findWrappedPolicyKey returns the index of the wrapped policy key
// corresponding to this policy and protector. The returned bool is false if no
// wrapped policy key corresponds to the specified protector, true otherwise.
func (policy *Policy) findWrappedKeyIndex(protectorDescriptor string) (int, bool) {
for idx, wrappedPolicyKey := range policy.data.WrappedPolicyKeys {
if wrappedPolicyKey.ProtectorDescriptor == protectorDescriptor {
return idx, true
}
}
return 0, false
}
// addKey adds the wrapped policy key to end of the wrapped key data.
func (policy *Policy) addKey(toAdd *metadata.WrappedPolicyKey) {
policy.data.WrappedPolicyKeys = append(policy.data.WrappedPolicyKeys, toAdd)
}
// removeKey removes the wrapped policy key at the specified index. This
// does not preserve the order of the wrapped policy key array. If no index is
// specified the last key is removed.
func (policy *Policy) removeKey(index int) *metadata.WrappedPolicyKey {
lastIdx := len(policy.data.WrappedPolicyKeys) - 1
toRemove := policy.data.WrappedPolicyKeys[index]
// See https://github.com/golang/go/wiki/SliceTricks
policy.data.WrappedPolicyKeys[index] = policy.data.WrappedPolicyKeys[lastIdx]
policy.data.WrappedPolicyKeys[lastIdx] = nil
policy.data.WrappedPolicyKeys = policy.data.WrappedPolicyKeys[:lastIdx]
return toRemove
}

300
vendor/github.com/google/fscrypt/actions/protector.go generated vendored Normal file
View File

@ -0,0 +1,300 @@
/*
* protector.go - functions for dealing with protectors
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package actions
import (
"fmt"
"log"
"os/user"
"github.com/google/fscrypt/crypto"
"github.com/google/fscrypt/metadata"
"github.com/google/fscrypt/util"
)
// LoginProtectorMountpoint is the mountpoint where login protectors are stored.
// This can be overridden by the user of this package.
var LoginProtectorMountpoint = "/"
// ErrLoginProtectorExists indicates that a user already has a login protector.
type ErrLoginProtectorExists struct {
User *user.User
}
func (err *ErrLoginProtectorExists) Error() string {
return fmt.Sprintf("user %q already has a login protector", err.User.Username)
}
// ErrLoginProtectorName indicates that a name was given for a login protector.
type ErrLoginProtectorName struct {
Name string
User *user.User
}
func (err *ErrLoginProtectorName) Error() string {
return fmt.Sprintf(`cannot assign name %q to new login protector for
user %q because login protectors are identified by user, not by name.`,
err.Name, err.User.Username)
}
// ErrMissingProtectorName indicates that a protector name is needed.
type ErrMissingProtectorName struct {
Source metadata.SourceType
}
func (err *ErrMissingProtectorName) Error() string {
return fmt.Sprintf("%s protectors must be named", err.Source)
}
// ErrProtectorNameExists indicates that a protector name already exists.
type ErrProtectorNameExists struct {
Name string
}
func (err *ErrProtectorNameExists) Error() string {
return fmt.Sprintf("there is already a protector named %q", err.Name)
}
// checkForProtectorWithName returns an error if there is already a protector
// on the filesystem with a specific name (or if we cannot read the necessary
// data).
func checkForProtectorWithName(ctx *Context, name string) error {
options, err := ctx.ProtectorOptions()
if err != nil {
return err
}
for _, option := range options {
if option.Name() == name {
return &ErrProtectorNameExists{name}
}
}
return nil
}
// checkIfUserHasLoginProtector returns an error if there is already a login
// protector on the filesystem for a specific user (or if we cannot read the
// necessary data).
func checkIfUserHasLoginProtector(ctx *Context, uid int64) error {
options, err := ctx.ProtectorOptions()
if err != nil {
return err
}
for _, option := range options {
if option.Source() == metadata.SourceType_pam_passphrase && option.UID() == uid {
return &ErrLoginProtectorExists{ctx.TargetUser}
}
}
return nil
}
// Protector represents an unlocked protector, so it contains the ProtectorData
// as well as the actual protector key. These unlocked Protectors are necessary
// to unlock policies and create new polices. As with the key struct, a
// Protector should be wiped after use.
type Protector struct {
Context *Context
data *metadata.ProtectorData
key *crypto.Key
created bool
ownerIfCreating *user.User
}
// CreateProtector creates an unlocked protector with a given name (name only
// needed for custom and raw protector types). The keyFn provided to create the
// Protector key will only be called once. If an error is returned, no data has
// been changed on the filesystem.
func CreateProtector(ctx *Context, name string, keyFn KeyFunc, owner *user.User) (*Protector, error) {
if err := ctx.checkContext(); err != nil {
return nil, err
}
// Sanity checks for names
if ctx.Config.Source == metadata.SourceType_pam_passphrase {
// login protectors don't need a name (we use the username instead)
if name != "" {
return nil, &ErrLoginProtectorName{name, ctx.TargetUser}
}
} else {
// non-login protectors need a name (so we can distinguish between them)
if name == "" {
return nil, &ErrMissingProtectorName{ctx.Config.Source}
}
// we don't want to duplicate naming
if err := checkForProtectorWithName(ctx, name); err != nil {
return nil, err
}
}
var err error
protector := &Protector{
Context: ctx,
data: &metadata.ProtectorData{
Name: name,
Source: ctx.Config.Source,
},
created: true,
ownerIfCreating: owner,
}
// Extra data is needed for some SourceTypes
switch protector.data.Source {
case metadata.SourceType_pam_passphrase:
// As the pam passphrases are user specific, we also store the
// UID for this kind of source.
protector.data.Uid = int64(util.AtoiOrPanic(ctx.TargetUser.Uid))
// Make sure we aren't duplicating protectors
if err = checkIfUserHasLoginProtector(ctx, protector.data.Uid); err != nil {
return nil, err
}
fallthrough
case metadata.SourceType_custom_passphrase:
// Our passphrase sources need costs and a random salt.
if protector.data.Salt, err = crypto.NewRandomBuffer(metadata.SaltLen); err != nil {
return nil, err
}
protector.data.Costs = ctx.Config.HashCosts
}
// Randomly create the underlying protector key (and wipe if we fail)
if protector.key, err = crypto.NewRandomKey(metadata.InternalKeyLen); err != nil {
return nil, err
}
protector.data.ProtectorDescriptor, err = crypto.ComputeKeyDescriptor(protector.key, 1)
if err != nil {
protector.Lock()
return nil, err
}
if err = protector.Rewrap(keyFn); err != nil {
protector.Lock()
return nil, err
}
return protector, nil
}
// GetProtector retrieves a Protector with a specific descriptor. The Protector
// is still locked in this case, so it must be unlocked before using certain
// methods.
func GetProtector(ctx *Context, descriptor string) (*Protector, error) {
log.Printf("Getting protector %s", descriptor)
err := ctx.checkContext()
if err != nil {
return nil, err
}
protector := &Protector{Context: ctx}
protector.data, err = ctx.Mount.GetRegularProtector(descriptor, ctx.TrustedUser)
return protector, err
}
// GetProtectorFromOption retrieves a protector based on a protector option.
// If the option had a load error, this function returns that error. The
// Protector is still locked in this case, so it must be unlocked before using
// certain methods.
func GetProtectorFromOption(ctx *Context, option *ProtectorOption) (*Protector, error) {
log.Printf("Getting protector %s from option", option.Descriptor())
if err := ctx.checkContext(); err != nil {
return nil, err
}
if option.LoadError != nil {
return nil, option.LoadError
}
// Replace the context if this is a linked protector
if option.LinkedMount != nil {
ctx = &Context{ctx.Config, option.LinkedMount, ctx.TargetUser, ctx.TrustedUser}
}
return &Protector{Context: ctx, data: option.data}, nil
}
// Descriptor returns the protector descriptor.
func (protector *Protector) Descriptor() string {
return protector.data.ProtectorDescriptor
}
// Destroy removes a protector from the filesystem. The internal key should
// still be wiped with Lock().
func (protector *Protector) Destroy() error {
return protector.Context.Mount.RemoveProtector(protector.Descriptor())
}
// Revert destroys a protector if it was created, but does nothing if it was
// just queried from the filesystem.
func (protector *Protector) Revert() error {
if !protector.created {
return nil
}
return protector.Destroy()
}
func (protector *Protector) String() string {
return fmt.Sprintf("Protector: %s\nMountpoint: %s\nSource: %s\nName: %s\nCosts: %v\nUID: %d",
protector.Descriptor(), protector.Context.Mount, protector.data.Source,
protector.data.Name, protector.data.Costs, protector.data.Uid)
}
// Unlock unwraps the Protector's internal key. The keyFn provided to unwrap the
// Protector key will be retried as necessary to get the correct key. Lock()
// should be called after use. Does nothing if protector is already unlocked.
func (protector *Protector) Unlock(keyFn KeyFunc) (err error) {
if protector.key != nil {
return
}
protector.key, err = unwrapProtectorKey(ProtectorInfo{protector.data}, keyFn)
return
}
// Lock wipes a Protector's internal Key. It should always be called after using
// an unlocked Protector. This is often done with a defer statement. There is
// no effect if called multiple times.
func (protector *Protector) Lock() error {
err := protector.key.Wipe()
protector.key = nil
return err
}
// Rewrap updates the data that is wrapping the Protector Key. This is useful if
// a user's password has changed, for example. The keyFn provided to rewrap
// the Protector key will only be called once. Requires unlocked Protector.
func (protector *Protector) Rewrap(keyFn KeyFunc) error {
if protector.key == nil {
return ErrLocked
}
wrappingKey, err := getWrappingKey(ProtectorInfo{protector.data}, keyFn, false)
if err != nil {
return err
}
// Revert change to wrapped key on failure
oldWrappedKey := protector.data.WrappedKey
defer func() {
wrappingKey.Wipe()
if err != nil {
protector.data.WrappedKey = oldWrappedKey
}
}()
if protector.data.WrappedKey, err = crypto.Wrap(wrappingKey, protector.key); err != nil {
return err
}
return protector.Context.Mount.AddProtector(protector.data, protector.ownerIfCreating)
}

131
vendor/github.com/google/fscrypt/actions/recovery.go generated vendored Normal file
View File

@ -0,0 +1,131 @@
/*
* recovery.go - support for generating recovery passphrases
*
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package actions
import (
"fmt"
"os"
"strconv"
"github.com/google/fscrypt/crypto"
"github.com/google/fscrypt/metadata"
"github.com/google/fscrypt/util"
)
// modifiedContextWithSource returns a copy of ctx with the protector source
// replaced by source.
func modifiedContextWithSource(ctx *Context, source metadata.SourceType) *Context {
modifiedConfig := *ctx.Config
modifiedConfig.Source = source
modifiedCtx := *ctx
modifiedCtx.Config = &modifiedConfig
return &modifiedCtx
}
// AddRecoveryPassphrase randomly generates a recovery passphrase and adds it as
// a custom_passphrase protector for the given Policy.
func AddRecoveryPassphrase(policy *Policy, dirname string) (*crypto.Key, *Protector, error) {
// 20 random characters in a-z is 94 bits of entropy, which is way more
// than enough for a passphrase which still goes through the usual
// passphrase hashing which makes it extremely costly to brute force.
passphrase, err := crypto.NewRandomPassphrase(20)
if err != nil {
return nil, nil, err
}
defer func() {
if err != nil {
passphrase.Wipe()
}
}()
getPassphraseFn := func(info ProtectorInfo, retry bool) (*crypto.Key, error) {
// CreateProtector() wipes the passphrase, but in this case we
// still need it for later, so make a copy.
return passphrase.Clone()
}
var recoveryProtector *Protector
customCtx := modifiedContextWithSource(policy.Context, metadata.SourceType_custom_passphrase)
seq := 1
for {
// Automatically generate a name for the recovery protector.
name := "Recovery passphrase for " + dirname
if seq != 1 {
name += " (" + strconv.Itoa(seq) + ")"
}
recoveryProtector, err = CreateProtector(customCtx, name, getPassphraseFn, policy.ownerIfCreating)
if err == nil {
break
}
if _, ok := err.(*ErrProtectorNameExists); !ok {
return nil, nil, err
}
seq++
}
if err := policy.AddProtector(recoveryProtector); err != nil {
recoveryProtector.Revert()
return nil, nil, err
}
return passphrase, recoveryProtector, nil
}
// WriteRecoveryInstructions writes a recovery passphrase and instructions to a
// file. This file should initially be located in the encrypted directory
// protected by the passphrase itself. It's up to the user to store the
// passphrase in a different location if they actually need it.
func WriteRecoveryInstructions(recoveryPassphrase *crypto.Key, recoveryProtector *Protector,
policy *Policy, path string) error {
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
return err
}
defer file.Close()
str := fmt.Sprintf(
`fscrypt automatically generated a recovery passphrase for this directory:
%s
It did this because you chose to protect this directory with your login
passphrase, but this directory is not on the root filesystem.
Copy this passphrase to a safe place if you want to still be able to unlock this
directory if you re-install the operating system or connect this storage media
to a different system (which would result in your login protector being lost).
To unlock this directory using this recovery passphrase, run 'fscrypt unlock'
and select the protector named %q.
If you want to disable recovery passphrase generation (not recommended),
re-create this directory and pass the --no-recovery option to 'fscrypt encrypt'.
Alternatively, you can remove this recovery passphrase protector using:
fscrypt metadata remove-protector-from-policy --force --protector=%s:%s --policy=%s:%s
It is safe to keep it around though, as the recovery passphrase is high-entropy.
`, recoveryPassphrase.Data(), recoveryProtector.data.Name,
recoveryProtector.Context.Mount.Path, recoveryProtector.data.ProtectorDescriptor,
policy.Context.Mount.Path, policy.data.KeyDescriptor)
if _, err = file.WriteString(str); err != nil {
return err
}
if recoveryProtector.ownerIfCreating != nil {
if err = util.Chown(file, recoveryProtector.ownerIfCreating); err != nil {
return err
}
}
return file.Sync()
}

228
vendor/github.com/google/fscrypt/crypto/crypto.go generated vendored Normal file
View File

@ -0,0 +1,228 @@
/*
* crypto.go - Cryptographic algorithms used by the rest of fscrypt.
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
// Package crypto manages all the cryptography for fscrypt. This includes:
// - Key management (key.go)
// - Securely holding keys in memory
// - Making recovery keys
// - Randomness (rand.go)
// - Cryptographic algorithms (crypto.go)
// - encryption (AES256-CTR)
// - authentication (SHA256-based HMAC)
// - key stretching (SHA256-based HKDF)
// - key wrapping/unwrapping (Encrypt then MAC)
// - passphrase-based key derivation (Argon2id)
// - key descriptor computation (double SHA512, or HKDF-SHA512)
package crypto
import (
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"crypto/sha256"
"crypto/sha512"
"encoding/hex"
"io"
"github.com/pkg/errors"
"golang.org/x/crypto/argon2"
"golang.org/x/crypto/hkdf"
"github.com/google/fscrypt/metadata"
"github.com/google/fscrypt/util"
)
// Crypto error values
var (
ErrBadAuth = errors.New("key authentication check failed")
ErrRecoveryCode = errors.New("invalid recovery code")
ErrMlockUlimit = errors.New("could not lock key in memory")
)
// panicInputLength panics if "name" has invalid length (expected != actual)
func panicInputLength(name string, expected, actual int) {
if err := util.CheckValidLength(expected, actual); err != nil {
panic(errors.Wrap(err, name))
}
}
// checkWrappingKey returns an error if the wrapping key has the wrong length
func checkWrappingKey(wrappingKey *Key) error {
err := util.CheckValidLength(metadata.InternalKeyLen, wrappingKey.Len())
return errors.Wrap(err, "wrapping key")
}
// stretchKey stretches a key of length InternalKeyLen using unsalted HKDF to
// make two keys of length InternalKeyLen.
func stretchKey(key *Key) (encKey, authKey *Key) {
panicInputLength("hkdf key", metadata.InternalKeyLen, key.Len())
// The new hkdf function uses the hash and key to create a reader that
// can be used to securely initialize multiple keys. This means that
// reads on the hkdf give independent cryptographic keys. The hkdf will
// also always have enough entropy to read two keys.
hkdf := hkdf.New(sha256.New, key.data, nil, nil)
encKey, err := NewFixedLengthKeyFromReader(hkdf, metadata.InternalKeyLen)
util.NeverError(err)
authKey, err = NewFixedLengthKeyFromReader(hkdf, metadata.InternalKeyLen)
util.NeverError(err)
return
}
// aesCTR runs AES256-CTR on the input using the provided key and iv. This
// function can be used to either encrypt or decrypt input of any size. Note
// that input and output must be the same size.
func aesCTR(key *Key, iv, input, output []byte) {
panicInputLength("aesCTR key", metadata.InternalKeyLen, key.Len())
panicInputLength("aesCTR iv", metadata.IVLen, len(iv))
panicInputLength("aesCTR output", len(input), len(output))
blockCipher, err := aes.NewCipher(key.data)
util.NeverError(err) // Key is checked to have correct length
stream := cipher.NewCTR(blockCipher, iv)
stream.XORKeyStream(output, input)
}
// getHMAC returns the SHA256-based HMAC of some data using the provided key.
func getHMAC(key *Key, data ...[]byte) []byte {
panicInputLength("hmac key", metadata.InternalKeyLen, key.Len())
mac := hmac.New(sha256.New, key.data)
for _, buffer := range data {
// SHA256 HMAC should never be unable to write the data
_, err := mac.Write(buffer)
util.NeverError(err)
}
return mac.Sum(nil)
}
// Wrap takes a wrapping Key of length InternalKeyLen, and uses it to wrap a
// secret Key of any length. This wrapping uses a random IV, the encrypted data,
// and an HMAC to verify the wrapping key was correct. All of this is included
// in the returned WrappedKeyData structure.
func Wrap(wrappingKey, secretKey *Key) (*metadata.WrappedKeyData, error) {
if err := checkWrappingKey(wrappingKey); err != nil {
return nil, err
}
data := &metadata.WrappedKeyData{EncryptedKey: make([]byte, secretKey.Len())}
// Get random IV
var err error
if data.IV, err = NewRandomBuffer(metadata.IVLen); err != nil {
return nil, err
}
// Stretch key for encryption and authentication (unsalted).
encKey, authKey := stretchKey(wrappingKey)
defer encKey.Wipe()
defer authKey.Wipe()
// Encrypt the secret and include the HMAC of the output ("Encrypt-then-MAC").
aesCTR(encKey, data.IV, secretKey.data, data.EncryptedKey)
data.Hmac = getHMAC(authKey, data.IV, data.EncryptedKey)
return data, nil
}
// Unwrap takes a wrapping Key of length InternalKeyLen, and uses it to unwrap
// the WrappedKeyData to get the unwrapped secret Key. The Wrapped Key data
// includes an authentication check, so an error will be returned if that check
// fails.
func Unwrap(wrappingKey *Key, data *metadata.WrappedKeyData) (*Key, error) {
if err := checkWrappingKey(wrappingKey); err != nil {
return nil, err
}
// Stretch key for encryption and authentication (unsalted).
encKey, authKey := stretchKey(wrappingKey)
defer encKey.Wipe()
defer authKey.Wipe()
// Check validity of the HMAC
if !hmac.Equal(getHMAC(authKey, data.IV, data.EncryptedKey), data.Hmac) {
return nil, ErrBadAuth
}
secretKey, err := NewBlankKey(len(data.EncryptedKey))
if err != nil {
return nil, err
}
aesCTR(encKey, data.IV, data.EncryptedKey, secretKey.data)
return secretKey, nil
}
func computeKeyDescriptorV1(key *Key) string {
h1 := sha512.Sum512(key.data)
h2 := sha512.Sum512(h1[:])
length := hex.DecodedLen(metadata.PolicyDescriptorLenV1)
return hex.EncodeToString(h2[:length])
}
func computeKeyDescriptorV2(key *Key) (string, error) {
// This algorithm is specified by the kernel. It uses unsalted
// HKDF-SHA512, where the application-information string is the prefix
// "fscrypt\0" followed by the HKDF_CONTEXT_KEY_IDENTIFIER byte.
hkdf := hkdf.New(sha512.New, key.data, nil, []byte("fscrypt\x00\x01"))
h := make([]byte, hex.DecodedLen(metadata.PolicyDescriptorLenV2))
if _, err := io.ReadFull(hkdf, h); err != nil {
return "", err
}
return hex.EncodeToString(h), nil
}
// ComputeKeyDescriptor computes the descriptor for a given cryptographic key.
// If policyVersion=1, it uses the first 8 bytes of the double application of
// SHA512 on the key. Use this for protectors and v1 policy keys.
// If policyVersion=2, it uses HKDF-SHA512 to compute a key identifier that's
// compatible with the kernel's key identifiers for v2 policy keys.
// In both cases, the resulting bytes are formatted as hex.
func ComputeKeyDescriptor(key *Key, policyVersion int64) (string, error) {
switch policyVersion {
case 1:
return computeKeyDescriptorV1(key), nil
case 2:
return computeKeyDescriptorV2(key)
default:
return "", errors.Errorf("policy version of %d is invalid", policyVersion)
}
}
// PassphraseHash uses Argon2id to produce a Key given the passphrase, salt, and
// hashing costs. This method is designed to take a long time and consume
// considerable memory. For more information, see the documentation at
// https://godoc.org/golang.org/x/crypto/argon2.
func PassphraseHash(passphrase *Key, salt []byte, costs *metadata.HashingCosts) (*Key, error) {
t := uint32(costs.Time)
m := uint32(costs.Memory)
p := uint8(costs.Parallelism)
key := argon2.IDKey(passphrase.data, salt, t, m, p, metadata.InternalKeyLen)
hash, err := NewBlankKey(metadata.InternalKeyLen)
if err != nil {
return nil, err
}
copy(hash.data, key)
return hash, nil
}

354
vendor/github.com/google/fscrypt/crypto/key.go generated vendored Normal file
View File

@ -0,0 +1,354 @@
/*
* key.go - Cryptographic key management for fscrypt. Ensures that sensitive
* material is properly handled throughout the program.
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package crypto
/*
#include <stdlib.h>
#include <string.h>
*/
import "C"
import (
"bytes"
"crypto/subtle"
"encoding/base32"
"io"
"log"
"os"
"runtime"
"unsafe"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"github.com/google/fscrypt/metadata"
"github.com/google/fscrypt/util"
)
const (
// Keys need to readable and writable, but hidden from other processes.
keyProtection = unix.PROT_READ | unix.PROT_WRITE
keyMmapFlags = unix.MAP_PRIVATE | unix.MAP_ANONYMOUS
)
/*
UseMlock determines whether we should use the mlock/munlock syscalls to
prevent sensitive data like keys and passphrases from being paged to disk.
UseMlock defaults to true, but can be set to false if the application calling
into this library has insufficient privileges to lock memory. Code using this
package could also bind this setting to a flag by using:
flag.BoolVar(&crypto.UseMlock, "lock-memory", true, "lock keys in memory")
*/
var UseMlock = true
/*
Key protects some arbitrary buffer of cryptographic material. Its methods
ensure that the Key's data is locked in memory before being used (if
UseMlock is set to true), and is wiped and unlocked after use (via the Wipe()
method). This data is never accessed outside of the fscrypt/crypto package
(except for the UnsafeData method). If a key is successfully created, the
Wipe() method should be called after it's use. For example:
func UseKeyFromStdin() error {
key, err := NewKeyFromReader(os.Stdin)
if err != nil {
return err
}
defer key.Wipe()
// Do stuff with key
return nil
}
The Wipe() method will also be called when a key is garbage collected; however,
it is best practice to clear the key as soon as possible, so it spends a minimal
amount of time in memory.
Note that Key is not thread safe, as a key could be wiped while another thread
is using it. Also, calling Wipe() from two threads could cause an error as
memory could be freed twice.
*/
type Key struct {
data []byte
}
// NewBlankKey constructs a blank key of a specified length and returns an error
// if we are unable to allocate or lock the necessary memory.
func NewBlankKey(length int) (*Key, error) {
if length == 0 {
return &Key{data: nil}, nil
} else if length < 0 {
return nil, errors.Errorf("requested key length %d is negative", length)
}
flags := keyMmapFlags
if UseMlock {
flags |= unix.MAP_LOCKED
}
// See MAP_ANONYMOUS in http://man7.org/linux/man-pages/man2/mmap.2.html
data, err := unix.Mmap(-1, 0, length, keyProtection, flags)
if err == unix.EAGAIN {
return nil, ErrMlockUlimit
}
if err != nil {
return nil, errors.Wrapf(err,
"failed to allocate (mmap) key buffer of length %d", length)
}
key := &Key{data: data}
// Backup finalizer in case user forgets to "defer key.Wipe()"
runtime.SetFinalizer(key, (*Key).Wipe)
return key, nil
}
// Wipe destroys a Key by zeroing and freeing the memory. The data is zeroed
// even if Wipe returns an error, which occurs if we are unable to unlock or
// free the key memory. Wipe does nothing if the key is already wiped or is nil.
func (key *Key) Wipe() error {
// We do nothing if key or key.data is nil so that Wipe() is idempotent
// and so Wipe() can be called on keys which have already been cleared.
if key != nil && key.data != nil {
data := key.data
key.data = nil
for i := range data {
data[i] = 0
}
if err := unix.Munmap(data); err != nil {
log.Printf("unix.Munmap() failed: %v", err)
return errors.Wrapf(err, "failed to free (munmap) key buffer")
}
}
return nil
}
// Len is the underlying data buffer's length.
func (key *Key) Len() int {
return len(key.data)
}
// Equals compares the contents of two keys, returning true if they have the same
// key data. This function runs in constant time.
func (key *Key) Equals(key2 *Key) bool {
return subtle.ConstantTimeCompare(key.data, key2.data) == 1
}
// resize returns a new key with size requestedSize and the appropriate data
// copied over. The original data is wiped. This method does nothing and returns
// itself if the key's length equals requestedSize.
func (key *Key) resize(requestedSize int) (*Key, error) {
if key.Len() == requestedSize {
return key, nil
}
defer key.Wipe()
resizedKey, err := NewBlankKey(requestedSize)
if err != nil {
return nil, err
}
copy(resizedKey.data, key.data)
return resizedKey, nil
}
// Data returns a slice of the key's underlying data. Note that this may become
// outdated if the key is resized.
func (key *Key) Data() []byte {
return key.data
}
// UnsafePtr returns an unsafe pointer to the key's underlying data. Note that
// this will only be valid as long as the key is not resized.
func (key *Key) UnsafePtr() unsafe.Pointer {
return util.Ptr(key.data)
}
// UnsafeToCString makes a copy of the string's data into a null-terminated C
// string allocated by C. Note that this method is unsafe as this C copy has no
// locking or wiping functionality. The key shouldn't contain any `\0` bytes.
func (key *Key) UnsafeToCString() unsafe.Pointer {
size := C.size_t(key.Len())
data := C.calloc(size+1, 1)
C.memcpy(data, util.Ptr(key.data), size)
return data
}
// Clone creates a key as a copy of another one.
func (key *Key) Clone() (*Key, error) {
newKey, err := NewBlankKey(key.Len())
if err != nil {
return nil, err
}
copy(newKey.data, key.data)
return newKey, nil
}
// NewKeyFromCString creates of a copy of some C string's data in a key. Note
// that the original C string is not modified at all, so steps must be taken to
// ensure that this original copy is secured.
func NewKeyFromCString(str unsafe.Pointer) (*Key, error) {
size := C.strlen((*C.char)(str))
key, err := NewBlankKey(int(size))
if err != nil {
return nil, err
}
C.memcpy(util.Ptr(key.data), str, size)
return key, nil
}
// NewKeyFromReader constructs a key of arbitrary length by reading from reader
// until hitting EOF.
func NewKeyFromReader(reader io.Reader) (*Key, error) {
// Use an initial key size of a page. As Mmap allocates a page anyway,
// there isn't much additional overhead from starting with a whole page.
key, err := NewBlankKey(os.Getpagesize())
if err != nil {
return nil, err
}
totalBytesRead := 0
for {
bytesRead, err := reader.Read(key.data[totalBytesRead:])
totalBytesRead += bytesRead
switch err {
case nil:
// Need to continue reading. Grow key if necessary
if key.Len() == totalBytesRead {
if key, err = key.resize(2 * key.Len()); err != nil {
return nil, err
}
}
case io.EOF:
// Getting the EOF error means we are done
return key.resize(totalBytesRead)
default:
// Fail if Read() has a failure
key.Wipe()
return nil, err
}
}
}
// NewFixedLengthKeyFromReader constructs a key with a specified length by
// reading exactly length bytes from reader.
func NewFixedLengthKeyFromReader(reader io.Reader, length int) (*Key, error) {
key, err := NewBlankKey(length)
if err != nil {
return nil, err
}
if _, err := io.ReadFull(reader, key.data); err != nil {
key.Wipe()
return nil, err
}
return key, nil
}
var (
// The recovery code is base32 with a dash between each block of 8 characters.
encoding = base32.StdEncoding
blockSize = 8
separator = []byte("-")
encodedLength = encoding.EncodedLen(metadata.PolicyKeyLen)
decodedLength = encoding.DecodedLen(encodedLength)
// RecoveryCodeLength is the number of bytes in every recovery code
RecoveryCodeLength = (encodedLength/blockSize)*(blockSize+len(separator)) - len(separator)
)
// WriteRecoveryCode outputs key's recovery code to the provided writer.
// WARNING: This recovery key is enough to derive the original key, so it must
// be given the same level of protection as a raw cryptographic key.
func WriteRecoveryCode(key *Key, writer io.Writer) error {
if err := util.CheckValidLength(metadata.PolicyKeyLen, key.Len()); err != nil {
return errors.Wrap(err, "recovery key")
}
// We store the base32 encoded data (without separators) in a temp key
encodedKey, err := NewBlankKey(encodedLength)
if err != nil {
return err
}
defer encodedKey.Wipe()
encoding.Encode(encodedKey.data, key.data)
w := util.NewErrWriter(writer)
// Write the blocks with separators between them
w.Write(encodedKey.data[:blockSize])
for blockStart := blockSize; blockStart < encodedLength; blockStart += blockSize {
w.Write(separator)
blockEnd := util.MinInt(blockStart+blockSize, encodedLength)
w.Write(encodedKey.data[blockStart:blockEnd])
}
// If any writes have failed, return the error
return w.Err()
}
// ReadRecoveryCode gets the recovery code from the provided reader and returns
// the corresponding cryptographic key.
// WARNING: This recovery key is enough to derive the original key, so it must
// be given the same level of protection as a raw cryptographic key.
func ReadRecoveryCode(reader io.Reader) (*Key, error) {
// We store the base32 encoded data (without separators) in a temp key
encodedKey, err := NewBlankKey(encodedLength)
if err != nil {
return nil, err
}
defer encodedKey.Wipe()
r := util.NewErrReader(reader)
// Read the other blocks, checking the separators between them
r.Read(encodedKey.data[:blockSize])
inputSeparator := make([]byte, len(separator))
for blockStart := blockSize; blockStart < encodedLength; blockStart += blockSize {
r.Read(inputSeparator)
if r.Err() == nil && !bytes.Equal(separator, inputSeparator) {
err = errors.Wrapf(ErrRecoveryCode, "invalid separator %q", inputSeparator)
return nil, err
}
blockEnd := util.MinInt(blockStart+blockSize, encodedLength)
r.Read(encodedKey.data[blockStart:blockEnd])
}
// If any reads have failed, return the error
if r.Err() != nil {
return nil, errors.Wrapf(ErrRecoveryCode, "read error %v", r.Err())
}
// Now we decode the key, resizing if necessary
decodedKey, err := NewBlankKey(decodedLength)
if err != nil {
return nil, err
}
if _, err = encoding.Decode(decodedKey.data, encodedKey.data); err != nil {
return nil, errors.Wrap(ErrRecoveryCode, err.Error())
}
return decodedKey.resize(metadata.PolicyKeyLen)
}

98
vendor/github.com/google/fscrypt/crypto/rand.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
/*
* rand.go - Reader used to generate secure random data for fscrypt.
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package crypto
import (
"io"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
// NewRandomBuffer uses the Linux Getrandom() syscall to create random bytes. If
// the operating system has insufficient randomness, the buffer creation will
// fail. This is an improvement over Go's built-in crypto/rand which will still
// return bytes if the system has insufficiency entropy.
// See: https://github.com/golang/go/issues/19274
//
// While this syscall was only introduced in Kernel v3.17, it predates the
// introduction of filesystem encryption, so it introduces no additional
// compatibility issues.
func NewRandomBuffer(length int) ([]byte, error) {
buffer := make([]byte, length)
if _, err := io.ReadFull(randReader{}, buffer); err != nil {
return nil, err
}
return buffer, nil
}
// NewRandomKey creates a random key of the specified length. This function uses
// the same random number generation process as NewRandomBuffer.
func NewRandomKey(length int) (*Key, error) {
return NewFixedLengthKeyFromReader(randReader{}, length)
}
// NewRandomPassphrase creates a random passphrase of the specified length
// containing random alphabetic characters.
func NewRandomPassphrase(length int) (*Key, error) {
chars := []byte("abcdefghijklmnopqrstuvwxyz")
passphrase, err := NewBlankKey(length)
if err != nil {
return nil, err
}
for i := 0; i < length; {
// Get some random bytes.
raw, err := NewRandomKey((length - i) * 2)
if err != nil {
return nil, err
}
// Translate the random bytes into random characters.
for _, b := range raw.data {
if int(b) >= 256-(256%len(chars)) {
// Avoid bias towards the first characters in the list.
continue
}
c := chars[int(b)%len(chars)]
passphrase.data[i] = c
i++
if i == length {
break
}
}
raw.Wipe()
}
return passphrase, nil
}
// randReader just calls into Getrandom, so no internal data is needed.
type randReader struct{}
func (r randReader) Read(buffer []byte) (int, error) {
n, err := unix.Getrandom(buffer, unix.GRND_NONBLOCK)
switch err {
case nil:
return n, nil
case unix.EAGAIN:
err = errors.New("insufficient entropy in pool")
case unix.ENOSYS:
err = errors.New("kernel must be v3.17 or later")
}
return 0, errors.Wrap(err, "getrandom() failed")
}

1088
vendor/github.com/google/fscrypt/filesystem/filesystem.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,578 @@
/*
* mountpoint.go - Contains all the functionality for finding mountpoints and
* using UUIDs to refer to them. Specifically, we can find the mountpoint of a
* path, get info about a mountpoint, and find mountpoints with a specific UUID.
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package filesystem
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"github.com/pkg/errors"
)
var (
// These maps hold data about the state of the system's filesystems.
//
// They only contain one Mount per filesystem, even if there are
// additional bind mounts, since we want to store fscrypt metadata in
// only one place per filesystem. When it is ambiguous which Mount
// should be used for a filesystem, mountsByDevice will contain an
// explicit nil entry, and mountsByPath won't contain an entry.
mountsByDevice map[DeviceNumber]*Mount
mountsByPath map[string]*Mount
// Used to make the mount functions thread safe
mountMutex sync.Mutex
// True if the maps have been successfully initialized.
mountsInitialized bool
// Supported tokens for filesystem links
uuidToken = "UUID"
pathToken = "PATH"
// Location to perform UUID lookup
uuidDirectory = "/dev/disk/by-uuid"
)
// Unescape octal-encoded escape sequences in a string from the mountinfo file.
// The kernel encodes the ' ', '\t', '\n', and '\\' bytes this way. This
// function exactly inverts what the kernel does, including by preserving
// invalid UTF-8.
func unescapeString(str string) string {
var sb strings.Builder
for i := 0; i < len(str); i++ {
b := str[i]
if b == '\\' && i+3 < len(str) {
if parsed, err := strconv.ParseInt(str[i+1:i+4], 8, 8); err == nil {
b = uint8(parsed)
i += 3
}
}
sb.WriteByte(b)
}
return sb.String()
}
// EscapeString is the reverse of unescapeString. Use this to avoid injecting
// spaces or newlines into output that uses these characters as separators.
func EscapeString(str string) string {
var sb strings.Builder
for _, b := range []byte(str) {
switch b {
case ' ', '\t', '\n', '\\':
sb.WriteString(fmt.Sprintf("\\%03o", b))
default:
sb.WriteByte(b)
}
}
return sb.String()
}
// We get the device name via the device number rather than use the mount source
// field directly. This is necessary to handle a rootfs that was mounted via
// the kernel command line, since mountinfo always shows /dev/root for that.
// This assumes that the device nodes are in the standard location.
func getDeviceName(num DeviceNumber) string {
linkPath := fmt.Sprintf("/sys/dev/block/%v", num)
if target, err := os.Readlink(linkPath); err == nil {
return fmt.Sprintf("/dev/%s", filepath.Base(target))
}
return ""
}
// Parse one line of /proc/self/mountinfo.
//
// The line contains the following space-separated fields:
// [0] mount ID
// [1] parent ID
// [2] major:minor
// [3] root
// [4] mount point
// [5] mount options
// [6...n-1] optional field(s)
// [n] separator
// [n+1] filesystem type
// [n+2] mount source
// [n+3] super options
//
// For more details, see https://www.kernel.org/doc/Documentation/filesystems/proc.txt
func parseMountInfoLine(line string) *Mount {
fields := strings.Split(line, " ")
if len(fields) < 10 {
return nil
}
// Count the optional fields. In case new fields are appended later,
// don't simply assume that n == len(fields) - 4.
n := 6
for fields[n] != "-" {
n++
if n >= len(fields) {
return nil
}
}
if n+3 >= len(fields) {
return nil
}
var mnt *Mount = &Mount{}
var err error
mnt.DeviceNumber, err = newDeviceNumberFromString(fields[2])
if err != nil {
return nil
}
mnt.Subtree = unescapeString(fields[3])
mnt.Path = unescapeString(fields[4])
for _, opt := range strings.Split(fields[5], ",") {
if opt == "ro" {
mnt.ReadOnly = true
}
}
mnt.FilesystemType = unescapeString(fields[n+1])
mnt.Device = getDeviceName(mnt.DeviceNumber)
return mnt
}
type mountpointTreeNode struct {
mount *Mount
parent *mountpointTreeNode
children []*mountpointTreeNode
}
func addUncontainedSubtreesRecursive(dst map[string]bool,
node *mountpointTreeNode, allUncontainedSubtrees map[string]bool) {
if allUncontainedSubtrees[node.mount.Subtree] {
dst[node.mount.Subtree] = true
}
for _, child := range node.children {
addUncontainedSubtreesRecursive(dst, child, allUncontainedSubtrees)
}
}
// findMainMount finds the "main" Mount of a filesystem. The "main" Mount is
// where the filesystem's fscrypt metadata is stored.
//
// Normally, there is just one Mount and it's of the entire filesystem
// (mnt.Subtree == "/"). But in general, the filesystem might be mounted in
// multiple places, including "bind mounts" where mnt.Subtree != "/". Also, the
// filesystem might have a combination of read-write and read-only mounts.
//
// To handle most cases, we could just choose a mount with mnt.Subtree == "/",
// preferably a read-write mount. However, that doesn't work in containers
// where the "/" subtree might not be mounted. Here's a real-world example:
//
// mnt.Subtree mnt.Path
// ----------- --------
// /var/lib/lxc/base/rootfs /
// /var/cache/pacman/pkg /var/cache/pacman/pkg
// /srv/repo/x86_64 /srv/http/x86_64
//
// In this case, all mnt.Subtree are independent. To handle this case, we must
// choose the Mount whose mnt.Path contains the others, i.e. the first one.
// Note: the fscrypt metadata won't be usable from outside the container since
// it won't be at the real root of the filesystem, but that may be acceptable.
//
// However, we can't look *only* at mnt.Path, since in some cases mnt.Subtree is
// needed to correctly handle bind mounts. For example, in the following case,
// the first Mount should be chosen:
//
// mnt.Subtree mnt.Path
// ----------- --------
// /foo /foo
// /foo/dir /dir
//
// To solve this, we divide the mounts into non-overlapping trees of mnt.Path.
// Then, we choose one of these trees which contains (exactly or via path
// prefix) *all* mnt.Subtree. We then return the root of this tree. In both
// the above examples, this algorithm returns the first Mount.
func findMainMount(filesystemMounts []*Mount) *Mount {
// Index this filesystem's mounts by path. Note: paths are unique here,
// since non-last mounts were already excluded earlier.
//
// Also build the set of all mounted subtrees.
filesystemMountsByPath := make(map[string]*mountpointTreeNode)
allSubtrees := make(map[string]bool)
for _, mnt := range filesystemMounts {
filesystemMountsByPath[mnt.Path] = &mountpointTreeNode{mount: mnt}
allSubtrees[mnt.Subtree] = true
}
// Divide the mounts into non-overlapping trees of mountpoints.
for path, mntNode := range filesystemMountsByPath {
for path != "/" && mntNode.parent == nil {
path = filepath.Dir(path)
if parent := filesystemMountsByPath[path]; parent != nil {
mntNode.parent = parent
parent.children = append(parent.children, mntNode)
}
}
}
// Build the set of mounted subtrees that aren't contained in any other
// mounted subtree.
allUncontainedSubtrees := make(map[string]bool)
for subtree := range allSubtrees {
contained := false
for t := subtree; t != "/" && !contained; {
t = filepath.Dir(t)
contained = allSubtrees[t]
}
if !contained {
allUncontainedSubtrees[subtree] = true
}
}
// Select the root of a mountpoint tree whose mounted subtrees contain
// *all* mounted subtrees. Equivalently, select a mountpoint tree in
// which every uncontained subtree is mounted.
var mainMount *Mount
for _, mntNode := range filesystemMountsByPath {
mnt := mntNode.mount
if mntNode.parent != nil {
continue
}
uncontainedSubtrees := make(map[string]bool)
addUncontainedSubtreesRecursive(uncontainedSubtrees, mntNode, allUncontainedSubtrees)
if len(uncontainedSubtrees) != len(allUncontainedSubtrees) {
continue
}
// If there's more than one eligible mount, they should have the
// same Subtree. Otherwise it's ambiguous which one to use.
if mainMount != nil && mainMount.Subtree != mnt.Subtree {
log.Printf("Unsupported case: %q (%v) has multiple non-overlapping mounts. This filesystem will be ignored!",
mnt.Device, mnt.DeviceNumber)
return nil
}
// Prefer a read-write mount to a read-only one.
if mainMount == nil || mainMount.ReadOnly {
mainMount = mnt
}
}
return mainMount
}
// This is separate from loadMountInfo() only for unit testing.
func readMountInfo(r io.Reader) error {
mountsByDevice = make(map[DeviceNumber]*Mount)
mountsByPath = make(map[string]*Mount)
allMountsByDevice := make(map[DeviceNumber][]*Mount)
allMountsByPath := make(map[string]*Mount)
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
mnt := parseMountInfoLine(line)
if mnt == nil {
log.Printf("ignoring invalid mountinfo line %q", line)
continue
}
// We can only use mountpoints that are directories for fscrypt.
if !isDir(mnt.Path) {
log.Printf("ignoring mountpoint %q because it is not a directory", mnt.Path)
continue
}
// Note this overrides the info if we have seen the mountpoint
// earlier in the file. This is correct behavior because the
// mountpoints are listed in mount order.
allMountsByPath[mnt.Path] = mnt
}
// For each filesystem, choose a "main" Mount and discard any additional
// bind mounts. fscrypt only cares about the main Mount, since it's
// where the fscrypt metadata is stored. Store all the main Mounts in
// mountsByDevice and mountsByPath so that they can be found later.
for _, mnt := range allMountsByPath {
allMountsByDevice[mnt.DeviceNumber] =
append(allMountsByDevice[mnt.DeviceNumber], mnt)
}
for deviceNumber, filesystemMounts := range allMountsByDevice {
mnt := findMainMount(filesystemMounts)
mountsByDevice[deviceNumber] = mnt // may store an explicit nil entry
if mnt != nil {
mountsByPath[mnt.Path] = mnt
}
}
return nil
}
// loadMountInfo populates the Mount mappings by parsing /proc/self/mountinfo.
// It returns an error if the Mount mappings cannot be populated.
func loadMountInfo() error {
if !mountsInitialized {
file, err := os.Open("/proc/self/mountinfo")
if err != nil {
return err
}
defer file.Close()
if err := readMountInfo(file); err != nil {
return err
}
mountsInitialized = true
}
return nil
}
func filesystemLacksMainMountError(deviceNumber DeviceNumber) error {
return errors.Errorf("Device %q (%v) lacks a \"main\" mountpoint in the current mount namespace, so it's ambiguous where to store the fscrypt metadata.",
getDeviceName(deviceNumber), deviceNumber)
}
// AllFilesystems lists all mounted filesystems ordered by path to their "main"
// Mount. Use CheckSetup() to see if they are set up for use with fscrypt.
func AllFilesystems() ([]*Mount, error) {
mountMutex.Lock()
defer mountMutex.Unlock()
if err := loadMountInfo(); err != nil {
return nil, err
}
mounts := make([]*Mount, 0, len(mountsByPath))
for _, mount := range mountsByPath {
mounts = append(mounts, mount)
}
sort.Sort(PathSorter(mounts))
return mounts, nil
}
// UpdateMountInfo updates the filesystem mountpoint maps with the current state
// of the filesystem mountpoints. Returns error if the initialization fails.
func UpdateMountInfo() error {
mountMutex.Lock()
defer mountMutex.Unlock()
mountsInitialized = false
return loadMountInfo()
}
// FindMount returns the main Mount object for the filesystem which contains the
// file at the specified path. An error is returned if the path is invalid or if
// we cannot load the required mount data. If a mount has been updated since the
// last call to one of the mount functions, run UpdateMountInfo to see changes.
func FindMount(path string) (*Mount, error) {
mountMutex.Lock()
defer mountMutex.Unlock()
if err := loadMountInfo(); err != nil {
return nil, err
}
// First try to find the mount by the number of the containing device.
deviceNumber, err := getNumberOfContainingDevice(path)
if err != nil {
return nil, err
}
mnt, ok := mountsByDevice[deviceNumber]
if ok {
if mnt == nil {
return nil, filesystemLacksMainMountError(deviceNumber)
}
return mnt, nil
}
// The mount couldn't be found by the number of the containing device.
// Fall back to walking up the directory hierarchy and checking for a
// mount at each directory path. This is necessary for btrfs, where
// files report a different st_dev from the /proc/self/mountinfo entry.
curPath, err := canonicalizePath(path)
if err != nil {
return nil, err
}
for {
mnt := mountsByPath[curPath]
if mnt != nil {
return mnt, nil
}
// Move to the parent directory unless we have reached the root.
parent := filepath.Dir(curPath)
if parent == curPath {
return nil, errors.Errorf("couldn't find mountpoint containing %q", path)
}
curPath = parent
}
}
// GetMount is like FindMount, except GetMount also returns an error if the path
// doesn't name the same file as the filesystem's "main" Mount. For example, if
// a filesystem is fully mounted at "/mnt" and if "/mnt/a" exists, then
// FindMount("/mnt/a") will succeed whereas GetMount("/mnt/a") will fail. This
// is true even if "/mnt/a" is a bind mount of part of the same filesystem.
func GetMount(mountpoint string) (*Mount, error) {
mnt, err := FindMount(mountpoint)
if err != nil {
return nil, &ErrNotAMountpoint{mountpoint}
}
// Check whether 'mountpoint' names the same directory as 'mnt.Path'.
// Use os.SameFile() (i.e., compare inode numbers) rather than compare
// canonical paths, since filesystems may be mounted in multiple places.
fi1, err := os.Stat(mountpoint)
if err != nil {
return nil, err
}
fi2, err := os.Stat(mnt.Path)
if err != nil {
return nil, err
}
if !os.SameFile(fi1, fi2) {
return nil, &ErrNotAMountpoint{mountpoint}
}
return mnt, nil
}
func uuidToDeviceNumber(uuid string) (DeviceNumber, error) {
uuidSymlinkPath := filepath.Join(uuidDirectory, uuid)
return getDeviceNumber(uuidSymlinkPath)
}
func deviceNumberToMount(deviceNumber DeviceNumber) (*Mount, bool) {
mountMutex.Lock()
defer mountMutex.Unlock()
if err := loadMountInfo(); err != nil {
log.Print(err)
return nil, false
}
mnt, ok := mountsByDevice[deviceNumber]
return mnt, ok
}
// getMountFromLink returns the main Mount, if any, for the filesystem which the
// given link points to. The link should contain a series of token-value pairs
// (<token>=<value>), one per line. The supported tokens are "UUID" and "PATH".
// If the UUID is present and it works, then it is used; otherwise, PATH is used
// if it is present. (The fallback from UUID to PATH will keep the link working
// if the UUID of the target filesystem changes but its mountpoint doesn't.)
//
// If a mount has been updated since the last call to one of the mount
// functions, make sure to run UpdateMountInfo first.
func getMountFromLink(link string) (*Mount, error) {
// Parse the link.
uuid := ""
path := ""
lines := strings.Split(link, "\n")
for _, line := range lines {
line := strings.TrimSpace(line)
if line == "" {
continue
}
pair := strings.Split(line, "=")
if len(pair) != 2 {
log.Printf("ignoring invalid line in filesystem link file: %q", line)
continue
}
token := pair[0]
value := pair[1]
switch token {
case uuidToken:
uuid = value
case pathToken:
path = value
default:
log.Printf("ignoring unknown link token %q", token)
}
}
// At least one of UUID and PATH must be present.
if uuid == "" && path == "" {
return nil, &ErrFollowLink{link, errors.Errorf("invalid filesystem link file")}
}
// Try following the UUID.
errMsg := ""
if uuid != "" {
deviceNumber, err := uuidToDeviceNumber(uuid)
if err == nil {
mnt, ok := deviceNumberToMount(deviceNumber)
if mnt != nil {
log.Printf("resolved filesystem link using UUID %q", uuid)
return mnt, nil
}
if ok {
return nil, &ErrFollowLink{link, filesystemLacksMainMountError(deviceNumber)}
}
log.Printf("cannot find filesystem with UUID %q", uuid)
} else {
log.Printf("cannot find filesystem with UUID %q: %v", uuid, err)
}
errMsg += fmt.Sprintf("cannot find filesystem with UUID %q", uuid)
if path != "" {
log.Printf("falling back to using mountpoint path instead of UUID")
}
}
// UUID didn't work. As a fallback, try the mountpoint path.
if path != "" {
mnt, err := GetMount(path)
if mnt != nil {
log.Printf("resolved filesystem link using mountpoint path %q", path)
return mnt, nil
}
log.Print(err)
if errMsg == "" {
errMsg = fmt.Sprintf("cannot find filesystem with main mountpoint %q", path)
} else {
errMsg += fmt.Sprintf(" or main mountpoint %q", path)
}
}
// No method worked; return an error.
return nil, &ErrFollowLink{link, errors.New(errMsg)}
}
func (mnt *Mount) getFilesystemUUID() (string, error) {
dirContents, err := ioutil.ReadDir(uuidDirectory)
if err != nil {
return "", err
}
for _, fileInfo := range dirContents {
if fileInfo.Mode()&os.ModeSymlink == 0 {
continue // Only interested in UUID symlinks
}
uuid := fileInfo.Name()
deviceNumber, err := uuidToDeviceNumber(uuid)
if err != nil {
log.Print(err)
continue
}
if mnt.DeviceNumber == deviceNumber {
return uuid, nil
}
}
return "", errors.Errorf("cannot determine UUID of device %q (%v)",
mnt.Device, mnt.DeviceNumber)
}
// makeLink creates the contents of a link file which will point to the given
// filesystem. This will normally be a string of the form
// "UUID=<uuid>\nPATH=<path>\n". If the UUID cannot be determined, the UUID
// portion will be omitted.
func makeLink(mnt *Mount) (string, error) {
uuid, err := mnt.getFilesystemUUID()
if err != nil {
// The UUID could not be determined. This happens for btrfs
// filesystems, as the device number found via
// /dev/disk/by-uuid/* for btrfs filesystems differs from the
// actual device number of the mounted filesystem. Just rely
// entirely on the fallback to mountpoint path.
log.Print(err)
return fmt.Sprintf("%s=%s\n", pathToken, mnt.Path), nil
}
return fmt.Sprintf("%s=%s\n%s=%s\n", uuidToken, uuid, pathToken, mnt.Path), nil
}

128
vendor/github.com/google/fscrypt/filesystem/path.go generated vendored Normal file
View File

@ -0,0 +1,128 @@
/*
* path.go - Utility functions for dealing with filesystem paths
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package filesystem
import (
"fmt"
"log"
"os"
"path/filepath"
"golang.org/x/sys/unix"
"github.com/pkg/errors"
)
// OpenFileOverridingUmask calls os.OpenFile but with the umask overridden so
// that no permission bits are masked out if the file is created.
func OpenFileOverridingUmask(name string, flag int, perm os.FileMode) (*os.File, error) {
oldMask := unix.Umask(0)
defer unix.Umask(oldMask)
return os.OpenFile(name, flag, perm)
}
// canonicalizePath turns path into an absolute path without symlinks.
func canonicalizePath(path string) (string, error) {
path, err := filepath.Abs(path)
if err != nil {
return "", err
}
path, err = filepath.EvalSymlinks(path)
// Get a better error if we have an invalid path
if pathErr, ok := err.(*os.PathError); ok {
err = errors.Wrap(pathErr.Err, pathErr.Path)
}
return path, err
}
// loggedStat runs os.Stat, but it logs the error if stat returns any error
// other than nil or IsNotExist.
func loggedStat(name string) (os.FileInfo, error) {
info, err := os.Stat(name)
if err != nil && !os.IsNotExist(err) {
log.Print(err)
}
return info, err
}
// loggedLstat runs os.Lstat (doesn't dereference trailing symlink), but it logs
// the error if lstat returns any error other than nil or IsNotExist.
func loggedLstat(name string) (os.FileInfo, error) {
info, err := os.Lstat(name)
if err != nil && !os.IsNotExist(err) {
log.Print(err)
}
return info, err
}
// isDir returns true if the path exists and is that of a directory.
func isDir(path string) bool {
info, err := loggedStat(path)
return err == nil && info.IsDir()
}
// isRegularFile returns true if the path exists and is that of a regular file.
func isRegularFile(path string) bool {
info, err := loggedStat(path)
return err == nil && info.Mode().IsRegular()
}
// HaveReadAccessTo returns true if the process has read access to a file or
// directory, without actually opening it.
func HaveReadAccessTo(path string) bool {
return unix.Access(path, unix.R_OK) == nil
}
// DeviceNumber represents a combined major:minor device number.
type DeviceNumber uint64
func (num DeviceNumber) String() string {
return fmt.Sprintf("%d:%d", unix.Major(uint64(num)), unix.Minor(uint64(num)))
}
func newDeviceNumberFromString(str string) (DeviceNumber, error) {
var major, minor uint32
if count, _ := fmt.Sscanf(str, "%d:%d", &major, &minor); count != 2 {
return 0, errors.Errorf("invalid device number string %q", str)
}
return DeviceNumber(unix.Mkdev(major, minor)), nil
}
// getDeviceNumber returns the device number of the device node at the given
// path. If there is a symlink at the path, it is dereferenced.
func getDeviceNumber(path string) (DeviceNumber, error) {
var stat unix.Stat_t
if err := unix.Stat(path, &stat); err != nil {
return 0, err
}
return DeviceNumber(stat.Rdev), nil
}
// getNumberOfContainingDevice returns the device number of the filesystem which
// contains the given file. If the file is a symlink, it is not dereferenced.
func getNumberOfContainingDevice(path string) (DeviceNumber, error) {
var stat unix.Stat_t
if err := unix.Lstat(path, &stat); err != nil {
return 0, err
}
return DeviceNumber(stat.Dev), nil
}

326
vendor/github.com/google/fscrypt/keyring/fs_keyring.go generated vendored Normal file
View File

@ -0,0 +1,326 @@
/*
* fs_keyring.go - Add/remove encryption policy keys to/from filesystem
*
* Copyright 2019 Google LLC
* Author: Eric Biggers (ebiggers@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package keyring
/*
#include <string.h>
*/
import "C"
import (
"encoding/hex"
"log"
"os"
"os/user"
"sync"
"unsafe"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"github.com/google/fscrypt/crypto"
"github.com/google/fscrypt/filesystem"
"github.com/google/fscrypt/security"
"github.com/google/fscrypt/util"
)
var (
fsKeyringSupported bool
fsKeyringSupportedKnown bool
fsKeyringSupportedLock sync.Mutex
)
func checkForFsKeyringSupport(mount *filesystem.Mount) bool {
dir, err := os.Open(mount.Path)
if err != nil {
log.Printf("Unexpected error opening %q. Assuming filesystem keyring is unsupported.",
mount.Path)
return false
}
defer dir.Close()
// FS_IOC_ADD_ENCRYPTION_KEY with a NULL argument will fail with ENOTTY
// if the ioctl isn't supported. Otherwise it should fail with EFAULT.
//
// Note that there's no need to check for FS_IOC_REMOVE_ENCRYPTION_KEY
// support separately, since it's guaranteed to be available if
// FS_IOC_ADD_ENCRYPTION_KEY is. There's also no need to check for
// support on every filesystem separately, since either the kernel
// supports the ioctls on all fscrypt-capable filesystems or it doesn't.
_, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(), unix.FS_IOC_ADD_ENCRYPTION_KEY, 0)
if errno == unix.ENOTTY {
log.Printf("Kernel doesn't support filesystem keyring. Falling back to user keyring.")
return false
}
if errno == unix.EFAULT {
log.Printf("Detected support for filesystem keyring")
} else {
// EFAULT is expected, but as long as we didn't get ENOTTY the
// ioctl should be available.
log.Printf("Unexpected error from FS_IOC_ADD_ENCRYPTION_KEY(%q, NULL): %v", mount.Path, errno)
}
return true
}
// IsFsKeyringSupported returns true if the kernel supports the ioctls to
// add/remove fscrypt keys directly to/from the filesystem. For support to be
// detected, the given Mount must be for a filesystem that supports fscrypt.
func IsFsKeyringSupported(mount *filesystem.Mount) bool {
fsKeyringSupportedLock.Lock()
defer fsKeyringSupportedLock.Unlock()
if !fsKeyringSupportedKnown {
fsKeyringSupported = checkForFsKeyringSupport(mount)
fsKeyringSupportedKnown = true
}
return fsKeyringSupported
}
// buildKeySpecifier converts the key descriptor string to an FscryptKeySpecifier.
func buildKeySpecifier(spec *unix.FscryptKeySpecifier, descriptor string) error {
descriptorBytes, err := hex.DecodeString(descriptor)
if err != nil {
return errors.Errorf("key descriptor %q is invalid", descriptor)
}
switch len(descriptorBytes) {
case unix.FSCRYPT_KEY_DESCRIPTOR_SIZE:
spec.Type = unix.FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR
case unix.FSCRYPT_KEY_IDENTIFIER_SIZE:
spec.Type = unix.FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER
default:
return errors.Errorf("key descriptor %q has unknown length", descriptor)
}
copy(spec.U[:], descriptorBytes)
return nil
}
type savedPrivs struct {
ruid, euid, suid int
}
// dropPrivsIfNeeded drops privileges (UIDs only) to the given user if we're
// working with a v2 policy key, and if the user is different from the user the
// process is currently running as.
//
// This is needed to change the effective UID so that FS_IOC_ADD_ENCRYPTION_KEY
// and FS_IOC_REMOVE_ENCRYPTION_KEY will add/remove a claim to the key for the
// intended user, and so that FS_IOC_GET_ENCRYPTION_KEY_STATUS will return the
// correct status flags for the user.
func dropPrivsIfNeeded(user *user.User, spec *unix.FscryptKeySpecifier) (*savedPrivs, error) {
if spec.Type == unix.FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR {
// v1 policy keys don't have any concept of user claims.
return nil, nil
}
targetUID := util.AtoiOrPanic(user.Uid)
ruid, euid, suid := security.GetUids()
if euid == targetUID {
return nil, nil
}
if err := security.SetUids(targetUID, targetUID, euid); err != nil {
return nil, err
}
return &savedPrivs{ruid, euid, suid}, nil
}
// restorePrivs restores root privileges if needed.
func restorePrivs(privs *savedPrivs) error {
if privs != nil {
return security.SetUids(privs.ruid, privs.euid, privs.suid)
}
return nil
}
// validateKeyDescriptor validates that the correct key descriptor was provided.
// This isn't really necessary; this is just an extra sanity check.
func validateKeyDescriptor(spec *unix.FscryptKeySpecifier, descriptor string) (string, error) {
if spec.Type != unix.FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER {
// v1 policy key: the descriptor is chosen arbitrarily by
// userspace, so there's nothing to validate.
return descriptor, nil
}
// v2 policy key. The descriptor ("identifier" in the kernel UAPI) is
// calculated as a cryptographic hash of the key itself. The kernel
// ignores the provided value, and calculates and returns it itself. So
// verify that the returned value is as expected. If it's not, the key
// doesn't actually match the encryption policy we thought it was for.
actual := hex.EncodeToString(spec.U[:unix.FSCRYPT_KEY_IDENTIFIER_SIZE])
if descriptor == actual {
return descriptor, nil
}
return actual,
errors.Errorf("provided and actual key descriptors differ (%q != %q)",
descriptor, actual)
}
// fsAddEncryptionKey adds the specified encryption key to the specified filesystem.
func fsAddEncryptionKey(key *crypto.Key, descriptor string,
mount *filesystem.Mount, user *user.User) error {
dir, err := os.Open(mount.Path)
if err != nil {
return err
}
defer dir.Close()
argKey, err := crypto.NewBlankKey(int(unsafe.Sizeof(unix.FscryptAddKeyArg{})) + key.Len())
if err != nil {
return err
}
defer argKey.Wipe()
arg := (*unix.FscryptAddKeyArg)(argKey.UnsafePtr())
if err = buildKeySpecifier(&arg.Key_spec, descriptor); err != nil {
return err
}
raw := unsafe.Pointer(uintptr(argKey.UnsafePtr()) + unsafe.Sizeof(*arg))
arg.Raw_size = uint32(key.Len())
C.memcpy(raw, key.UnsafePtr(), C.size_t(key.Len()))
savedPrivs, err := dropPrivsIfNeeded(user, &arg.Key_spec)
if err != nil {
return err
}
_, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(),
unix.FS_IOC_ADD_ENCRYPTION_KEY, uintptr(argKey.UnsafePtr()))
restorePrivs(savedPrivs)
log.Printf("FS_IOC_ADD_ENCRYPTION_KEY(%q, %s, <raw>) = %v", mount.Path, descriptor, errno)
if errno != 0 {
return errors.Wrapf(errno,
"error adding key with descriptor %s to filesystem %s",
descriptor, mount.Path)
}
if descriptor, err = validateKeyDescriptor(&arg.Key_spec, descriptor); err != nil {
fsRemoveEncryptionKey(descriptor, mount, user)
return err
}
return nil
}
// fsRemoveEncryptionKey removes the specified encryption key from the specified
// filesystem.
func fsRemoveEncryptionKey(descriptor string, mount *filesystem.Mount,
user *user.User) error {
dir, err := os.Open(mount.Path)
if err != nil {
return err
}
defer dir.Close()
var arg unix.FscryptRemoveKeyArg
if err = buildKeySpecifier(&arg.Key_spec, descriptor); err != nil {
return err
}
ioc := uintptr(unix.FS_IOC_REMOVE_ENCRYPTION_KEY)
iocName := "FS_IOC_REMOVE_ENCRYPTION_KEY"
var savedPrivs *savedPrivs
if user == nil {
ioc = unix.FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS
iocName = "FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS"
} else {
savedPrivs, err = dropPrivsIfNeeded(user, &arg.Key_spec)
if err != nil {
return err
}
}
_, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(), ioc, uintptr(unsafe.Pointer(&arg)))
restorePrivs(savedPrivs)
log.Printf("%s(%q, %s) = %v, removal_status_flags=0x%x",
iocName, mount.Path, descriptor, errno, arg.Removal_status_flags)
switch errno {
case 0:
switch {
case arg.Removal_status_flags&unix.FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS != 0:
return ErrKeyAddedByOtherUsers
case arg.Removal_status_flags&unix.FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY != 0:
return ErrKeyFilesOpen
}
return nil
case unix.ENOKEY:
// ENOKEY means either the key is completely missing or that the
// current user doesn't have a claim to it. Distinguish between
// these two cases by getting the key status.
if user != nil {
status, _ := fsGetEncryptionKeyStatus(descriptor, mount, user)
if status == KeyPresentButOnlyOtherUsers {
return ErrKeyAddedByOtherUsers
}
}
return ErrKeyNotPresent
default:
return errors.Wrapf(errno,
"error removing key with descriptor %s from filesystem %s",
descriptor, mount.Path)
}
}
// fsGetEncryptionKeyStatus gets the status of the specified encryption key on
// the specified filesystem.
func fsGetEncryptionKeyStatus(descriptor string, mount *filesystem.Mount,
user *user.User) (KeyStatus, error) {
dir, err := os.Open(mount.Path)
if err != nil {
return KeyStatusUnknown, err
}
defer dir.Close()
var arg unix.FscryptGetKeyStatusArg
err = buildKeySpecifier(&arg.Key_spec, descriptor)
if err != nil {
return KeyStatusUnknown, err
}
savedPrivs, err := dropPrivsIfNeeded(user, &arg.Key_spec)
if err != nil {
return KeyStatusUnknown, err
}
_, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(),
unix.FS_IOC_GET_ENCRYPTION_KEY_STATUS, uintptr(unsafe.Pointer(&arg)))
restorePrivs(savedPrivs)
log.Printf("FS_IOC_GET_ENCRYPTION_KEY_STATUS(%q, %s) = %v, status=%d, status_flags=0x%x",
mount.Path, descriptor, errno, arg.Status, arg.Status_flags)
if errno != 0 {
return KeyStatusUnknown,
errors.Wrapf(errno,
"error getting status of key with descriptor %s on filesystem %s",
descriptor, mount.Path)
}
switch arg.Status {
case unix.FSCRYPT_KEY_STATUS_ABSENT:
return KeyAbsent, nil
case unix.FSCRYPT_KEY_STATUS_PRESENT:
if arg.Key_spec.Type != unix.FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR &&
(arg.Status_flags&unix.FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF) == 0 {
return KeyPresentButOnlyOtherUsers, nil
}
return KeyPresent, nil
case unix.FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED:
return KeyAbsentButFilesBusy, nil
default:
return KeyStatusUnknown,
errors.Errorf("unknown key status (%d) for key with descriptor %s on filesystem %s",
arg.Status, descriptor, mount.Path)
}
}

175
vendor/github.com/google/fscrypt/keyring/keyring.go generated vendored Normal file
View File

@ -0,0 +1,175 @@
/*
* keyring.go - Add/remove encryption policy keys to/from kernel
*
* Copyright 2019 Google LLC
* Author: Eric Biggers (ebiggers@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
// Package keyring manages adding, removing, and getting the status of
// encryption policy keys to/from the kernel. Most public functions are in
// keyring.go, and they delegate to either user_keyring.go or fs_keyring.go,
// depending on whether a user keyring or a filesystem keyring is being used.
//
// v2 encryption policies always use the filesystem keyring.
// v1 policies use the user keyring by default, but can be configured to use the
// filesystem keyring instead (requires root and kernel v5.4+).
package keyring
import (
"encoding/hex"
"os/user"
"strconv"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"github.com/google/fscrypt/crypto"
"github.com/google/fscrypt/filesystem"
"github.com/google/fscrypt/metadata"
"github.com/google/fscrypt/util"
)
// Keyring error values
var (
ErrKeyAddedByOtherUsers = errors.New("other users have added the key too")
ErrKeyFilesOpen = errors.New("some files using the key are still open")
ErrKeyNotPresent = errors.New("key not present or already removed")
ErrV2PoliciesUnsupported = errors.New("kernel is too old to support v2 encryption policies")
)
// Options are the options which specify *which* keyring the key should be
// added/removed/gotten to, and how.
type Options struct {
// Mount is the filesystem to which the key should be
// added/removed/gotten.
Mount *filesystem.Mount
// User is the user for whom the key should be added/removed/gotten.
User *user.User
// UseFsKeyringForV1Policies is true if keys for v1 encryption policies
// should be put in the filesystem's keyring (if supported) rather than
// in the user's keyring. Note that this makes AddEncryptionKey and
// RemoveEncryptionKey require root privileges.
UseFsKeyringForV1Policies bool
}
func shouldUseFsKeyring(descriptor string, options *Options) (bool, error) {
// For v1 encryption policy keys, use the filesystem keyring if
// use_fs_keyring_for_v1_policies is set in /etc/fscrypt.conf and the
// kernel supports it.
if len(descriptor) == hex.EncodedLen(unix.FSCRYPT_KEY_DESCRIPTOR_SIZE) {
return options.UseFsKeyringForV1Policies && IsFsKeyringSupported(options.Mount), nil
}
// For v2 encryption policy keys, always use the filesystem keyring; the
// kernel doesn't support any other way.
if !IsFsKeyringSupported(options.Mount) {
return true, ErrV2PoliciesUnsupported
}
return true, nil
}
// buildKeyDescription builds the description for an fscrypt key of type
// "logon". For ext4 and f2fs, it uses the legacy filesystem-specific prefixes
// for compatibility with kernels before v4.8 and v4.6 respectively. For other
// filesystems it uses the generic prefix "fscrypt".
func buildKeyDescription(options *Options, descriptor string) string {
switch options.Mount.FilesystemType {
case "ext4", "f2fs":
return options.Mount.FilesystemType + ":" + descriptor
default:
return unix.FSCRYPT_KEY_DESC_PREFIX + descriptor
}
}
// AddEncryptionKey adds an encryption policy key to a kernel keyring. It uses
// either the filesystem keyring for the target Mount or the user keyring for
// the target User.
func AddEncryptionKey(key *crypto.Key, descriptor string, options *Options) error {
if err := util.CheckValidLength(metadata.PolicyKeyLen, key.Len()); err != nil {
return errors.Wrap(err, "policy key")
}
useFsKeyring, err := shouldUseFsKeyring(descriptor, options)
if err != nil {
return err
}
if useFsKeyring {
return fsAddEncryptionKey(key, descriptor, options.Mount, options.User)
}
return userAddKey(key, buildKeyDescription(options, descriptor), options.User)
}
// RemoveEncryptionKey removes an encryption policy key from a kernel keyring.
// It uses either the filesystem keyring for the target Mount or the user
// keyring for the target User.
func RemoveEncryptionKey(descriptor string, options *Options, allUsers bool) error {
useFsKeyring, err := shouldUseFsKeyring(descriptor, options)
if err != nil {
return err
}
if useFsKeyring {
user := options.User
if allUsers {
user = nil
}
return fsRemoveEncryptionKey(descriptor, options.Mount, user)
}
return userRemoveKey(buildKeyDescription(options, descriptor), options.User)
}
// KeyStatus is an enum that represents the status of a key in a kernel keyring.
type KeyStatus int
// The possible values of KeyStatus.
const (
KeyStatusUnknown = 0 + iota
KeyAbsent
KeyAbsentButFilesBusy
KeyPresent
KeyPresentButOnlyOtherUsers
)
func (status KeyStatus) String() string {
switch status {
case KeyStatusUnknown:
return "Unknown"
case KeyAbsent:
return "Absent"
case KeyAbsentButFilesBusy:
return "AbsentButFilesBusy"
case KeyPresent:
return "Present"
case KeyPresentButOnlyOtherUsers:
return "PresentButOnlyOtherUsers"
default:
return strconv.Itoa(int(status))
}
}
// GetEncryptionKeyStatus gets the status of an encryption policy key in a
// kernel keyring. It uses either the filesystem keyring for the target Mount
// or the user keyring for the target User.
func GetEncryptionKeyStatus(descriptor string, options *Options) (KeyStatus, error) {
useFsKeyring, err := shouldUseFsKeyring(descriptor, options)
if err != nil {
return KeyStatusUnknown, err
}
if useFsKeyring {
return fsGetEncryptionKeyStatus(descriptor, options.Mount, options.User)
}
_, _, err = userFindKey(buildKeyDescription(options, descriptor), options.User)
if err != nil {
return KeyAbsent, nil
}
return KeyPresent, nil
}

View File

@ -0,0 +1,251 @@
/*
* user_keyring.go - Add/remove encryption policy keys to/from user keyrings.
* This is the deprecated mechanism; see fs_keyring.go for the new mechanism.
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package keyring
import (
"os/user"
"runtime"
"unsafe"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"fmt"
"log"
"github.com/google/fscrypt/crypto"
"github.com/google/fscrypt/security"
"github.com/google/fscrypt/util"
)
// ErrAccessUserKeyring indicates that a user's keyring cannot be
// accessed.
type ErrAccessUserKeyring struct {
TargetUser *user.User
UnderlyingError error
}
func (err *ErrAccessUserKeyring) Error() string {
return fmt.Sprintf("could not access user keyring for %q: %s",
err.TargetUser.Username, err.UnderlyingError)
}
// ErrSessionUserKeyring indicates that a user's keyring is not linked
// into the session keyring.
type ErrSessionUserKeyring struct {
TargetUser *user.User
}
func (err *ErrSessionUserKeyring) Error() string {
return fmt.Sprintf("user keyring for %q is not linked into the session keyring",
err.TargetUser.Username)
}
// KeyType is always logon as required by filesystem encryption.
const KeyType = "logon"
// userAddKey puts the provided policy key into the user keyring for the
// specified user with the provided description, and type logon.
func userAddKey(key *crypto.Key, description string, targetUser *user.User) error {
runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring
defer runtime.UnlockOSThread()
// Create our payload (containing an FscryptKey)
payload, err := crypto.NewBlankKey(int(unsafe.Sizeof(unix.FscryptKey{})))
if err != nil {
return err
}
defer payload.Wipe()
// Cast the payload to an FscryptKey so we can initialize the fields.
fscryptKey := (*unix.FscryptKey)(payload.UnsafePtr())
// Mode is ignored by the kernel
fscryptKey.Mode = 0
fscryptKey.Size = uint32(key.Len())
copy(fscryptKey.Raw[:], key.Data())
keyringID, err := UserKeyringID(targetUser, true)
if err != nil {
return err
}
keyID, err := unix.AddKey(KeyType, description, payload.Data(), keyringID)
log.Printf("KeyctlAddKey(%s, %s, <data>, %d) = %d, %v",
KeyType, description, keyringID, keyID, err)
if err != nil {
return errors.Wrapf(err,
"error adding key with description %s to user keyring for %q",
description, targetUser.Username)
}
return nil
}
// userRemoveKey tries to remove a policy key from the user keyring with the
// provided description. An error is returned if the key does not exist.
func userRemoveKey(description string, targetUser *user.User) error {
runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring
defer runtime.UnlockOSThread()
keyID, keyringID, err := userFindKey(description, targetUser)
if err != nil {
return ErrKeyNotPresent
}
_, err = unix.KeyctlInt(unix.KEYCTL_UNLINK, keyID, keyringID, 0, 0)
log.Printf("KeyctlUnlink(%d, %d) = %v", keyID, keyringID, err)
if err != nil {
return errors.Wrapf(err,
"error removing key with description %s from user keyring for %q",
description, targetUser.Username)
}
return nil
}
// userFindKey tries to locate a key with the provided description in the user
// keyring for the target user. The key ID and keyring ID are returned if we can
// find the key. An error is returned if the key does not exist.
func userFindKey(description string, targetUser *user.User) (int, int, error) {
runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring
defer runtime.UnlockOSThread()
keyringID, err := UserKeyringID(targetUser, false)
if err != nil {
return 0, 0, err
}
keyID, err := unix.KeyctlSearch(keyringID, KeyType, description, 0)
log.Printf("KeyctlSearch(%d, %s, %s) = %d, %v", keyringID, KeyType, description, keyID, err)
if err != nil {
return 0, 0, errors.Wrapf(err,
"error searching for key %s in user keyring for %q",
description, targetUser.Username)
}
return keyID, keyringID, err
}
// UserKeyringID returns the key id of the target user's user keyring. We also
// ensure that the keyring will be accessible by linking it into the thread
// keyring and linking it into the root user keyring (permissions allowing). If
// checkSession is true, an error is returned if a normal user requests their
// user keyring, but it is not in the current session keyring.
func UserKeyringID(targetUser *user.User, checkSession bool) (int, error) {
runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring
defer runtime.UnlockOSThread()
uid := util.AtoiOrPanic(targetUser.Uid)
targetKeyring, err := userKeyringIDLookup(uid)
if err != nil {
return 0, &ErrAccessUserKeyring{targetUser, err}
}
if !util.IsUserRoot() {
// Make sure the returned keyring will be accessible by checking
// that it is in the session keyring.
if checkSession && !isUserKeyringInSession(uid) {
return 0, &ErrSessionUserKeyring{targetUser}
}
return targetKeyring, nil
}
// Make sure the returned keyring will be accessible by linking it into
// the root user's user keyring (which will not be garbage collected).
rootKeyring, err := userKeyringIDLookup(0)
if err != nil {
return 0, errors.Wrapf(err, "error looking up root's user keyring")
}
if rootKeyring != targetKeyring {
if err = keyringLink(targetKeyring, rootKeyring); err != nil {
return 0, errors.Wrapf(err,
"error linking user keyring for %q into root's user keyring",
targetUser.Username)
}
}
return targetKeyring, nil
}
func userKeyringIDLookup(uid int) (keyringID int, err error) {
// Our goals here are to:
// - Find the user keyring (for the provided uid)
// - Link it into the current thread keyring (so we can use it)
// - Make no permanent changes to the process privileges
// Complicating this are the facts that:
// - The value of KEY_SPEC_USER_KEYRING is determined by the ruid
// - Keyring linking permissions use the euid
// So we have to change both the ruid and euid to make this work,
// setting the suid to 0 so that we can later switch back.
ruid, euid, suid := security.GetUids()
if ruid != uid || euid != uid {
if err = security.SetUids(uid, uid, 0); err != nil {
return
}
defer func() {
resetErr := security.SetUids(ruid, euid, suid)
if resetErr != nil {
err = resetErr
}
}()
}
// We get the value of KEY_SPEC_USER_KEYRING. Note that this will also
// trigger the creation of the uid keyring if it does not yet exist.
keyringID, err = unix.KeyctlGetKeyringID(unix.KEY_SPEC_USER_KEYRING, true)
log.Printf("keyringID(_uid.%d) = %d, %v", uid, keyringID, err)
if err != nil {
return 0, err
}
// We still want to use this keyring after our privileges are reset. So
// we link it into the thread keyring, preventing a loss of access.
//
// We must be under LockOSThread() for this to work reliably. Note that
// we can't just use the process keyring, since it doesn't work reliably
// in Go programs, due to the Go runtime creating threads before the
// program starts and has a chance to create the process keyring.
if err = keyringLink(keyringID, unix.KEY_SPEC_THREAD_KEYRING); err != nil {
return 0, err
}
return keyringID, nil
}
// isUserKeyringInSession tells us if the user's uid keyring is in the current
// session keyring.
func isUserKeyringInSession(uid int) bool {
// We cannot use unix.KEY_SPEC_SESSION_KEYRING directly as that might
// create a session keyring if one does not exist.
sessionKeyring, err := unix.KeyctlGetKeyringID(unix.KEY_SPEC_SESSION_KEYRING, false)
log.Printf("keyringID(session) = %d, %v", sessionKeyring, err)
if err != nil {
return false
}
description := fmt.Sprintf("_uid.%d", uid)
id, err := unix.KeyctlSearch(sessionKeyring, "keyring", description, 0)
log.Printf("KeyctlSearch(%d, keyring, %s) = %d, %v", sessionKeyring, description, id, err)
return err == nil
}
func keyringLink(keyID int, keyringID int) error {
_, err := unix.KeyctlInt(unix.KEYCTL_LINK, keyID, keyringID, 0, 0)
log.Printf("KeyctlLink(%d, %d) = %v", keyID, keyringID, err)
return err
}

221
vendor/github.com/google/fscrypt/metadata/checks.go generated vendored Normal file
View File

@ -0,0 +1,221 @@
/*
* checks.go - Some sanity check methods for our metadata structures
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package metadata
import (
"github.com/golang/protobuf/proto"
"github.com/pkg/errors"
"github.com/google/fscrypt/util"
)
var errNotInitialized = errors.New("not initialized")
// Metadata is the interface to all of the protobuf structures that can be
// checked for validity.
type Metadata interface {
CheckValidity() error
proto.Message
}
// CheckValidity ensures the mode has a name and isn't empty.
func (m EncryptionOptions_Mode) CheckValidity() error {
if m == EncryptionOptions_default {
return errNotInitialized
}
if m.String() == "" {
return errors.Errorf("unknown %d", m)
}
return nil
}
// CheckValidity ensures the source has a name and isn't empty.
func (s SourceType) CheckValidity() error {
if s == SourceType_default {
return errNotInitialized
}
if s.String() == "" {
return errors.Errorf("unknown %d", s)
}
return nil
}
// CheckValidity ensures the hash costs will be accepted by Argon2.
func (h *HashingCosts) CheckValidity() error {
if h == nil {
return errNotInitialized
}
if h.Time <= 0 {
return errors.Errorf("time=%d is not positive", h.Time)
}
if h.Parallelism <= 0 {
return errors.Errorf("parallelism=%d is not positive", h.Parallelism)
}
minMemory := 8 * h.Parallelism
if h.Memory < minMemory {
return errors.Errorf("memory=%d is less than minimum (%d)", h.Memory, minMemory)
}
return nil
}
// CheckValidity ensures our buffers are the correct length.
func (w *WrappedKeyData) CheckValidity() error {
if w == nil {
return errNotInitialized
}
if len(w.EncryptedKey) == 0 {
return errors.Wrap(errNotInitialized, "encrypted key")
}
if err := util.CheckValidLength(IVLen, len(w.IV)); err != nil {
return errors.Wrap(err, "IV")
}
return errors.Wrap(util.CheckValidLength(HMACLen, len(w.Hmac)), "HMAC")
}
// CheckValidity ensures our ProtectorData has the correct fields for its source.
func (p *ProtectorData) CheckValidity() error {
if p == nil {
return errNotInitialized
}
if err := p.Source.CheckValidity(); err != nil {
return errors.Wrap(err, "protector source")
}
// Source specific checks
switch p.Source {
case SourceType_pam_passphrase:
if p.Uid < 0 {
return errors.Errorf("UID=%d is negative", p.Uid)
}
fallthrough
case SourceType_custom_passphrase:
if err := p.Costs.CheckValidity(); err != nil {
return errors.Wrap(err, "passphrase hashing costs")
}
if err := util.CheckValidLength(SaltLen, len(p.Salt)); err != nil {
return errors.Wrap(err, "passphrase hashing salt")
}
}
// Generic checks
if err := p.WrappedKey.CheckValidity(); err != nil {
return errors.Wrap(err, "wrapped protector key")
}
if err := util.CheckValidLength(ProtectorDescriptorLen, len(p.ProtectorDescriptor)); err != nil {
return errors.Wrap(err, "protector descriptor")
}
err := util.CheckValidLength(InternalKeyLen, len(p.WrappedKey.EncryptedKey))
return errors.Wrap(err, "encrypted protector key")
}
// CheckValidity ensures each of the options is valid.
func (e *EncryptionOptions) CheckValidity() error {
if e == nil {
return errNotInitialized
}
if _, ok := util.Index(e.Padding, paddingArray); !ok {
return errors.Errorf("padding of %d is invalid", e.Padding)
}
if err := e.Contents.CheckValidity(); err != nil {
return errors.Wrap(err, "contents encryption mode")
}
if err := e.Filenames.CheckValidity(); err != nil {
return errors.Wrap(err, "filenames encryption mode")
}
// If PolicyVersion is unset, treat it as 1.
if e.PolicyVersion == 0 {
e.PolicyVersion = 1
}
if e.PolicyVersion != 1 && e.PolicyVersion != 2 {
return errors.Errorf("policy version of %d is invalid", e.PolicyVersion)
}
return nil
}
// CheckValidity ensures the fields are valid and have the correct lengths.
func (w *WrappedPolicyKey) CheckValidity() error {
if w == nil {
return errNotInitialized
}
if err := w.WrappedKey.CheckValidity(); err != nil {
return errors.Wrap(err, "wrapped key")
}
if err := util.CheckValidLength(PolicyKeyLen, len(w.WrappedKey.EncryptedKey)); err != nil {
return errors.Wrap(err, "encrypted key")
}
err := util.CheckValidLength(ProtectorDescriptorLen, len(w.ProtectorDescriptor))
return errors.Wrap(err, "wrapping protector descriptor")
}
// CheckValidity ensures the fields and each wrapped key are valid.
func (p *PolicyData) CheckValidity() error {
if p == nil {
return errNotInitialized
}
// Check each wrapped key
for i, w := range p.WrappedPolicyKeys {
if err := w.CheckValidity(); err != nil {
return errors.Wrapf(err, "policy key slot %d", i)
}
}
if err := p.Options.CheckValidity(); err != nil {
return errors.Wrap(err, "policy options")
}
var expectedLen int
switch p.Options.PolicyVersion {
case 1:
expectedLen = PolicyDescriptorLenV1
case 2:
expectedLen = PolicyDescriptorLenV2
default:
return errors.Errorf("policy version of %d is invalid", p.Options.PolicyVersion)
}
if err := util.CheckValidLength(expectedLen, len(p.KeyDescriptor)); err != nil {
return errors.Wrap(err, "policy key descriptor")
}
return nil
}
// CheckValidity ensures the Config has all the necessary info for its Source.
func (c *Config) CheckValidity() error {
// General checks
if c == nil {
return errNotInitialized
}
if err := c.Source.CheckValidity(); err != nil {
return errors.Wrap(err, "default config source")
}
// Source specific checks
switch c.Source {
case SourceType_pam_passphrase, SourceType_custom_passphrase:
if err := c.HashCosts.CheckValidity(); err != nil {
return errors.Wrap(err, "config hashing costs")
}
}
return errors.Wrap(c.Options.CheckValidity(), "config options")
}

59
vendor/github.com/google/fscrypt/metadata/config.go generated vendored Normal file
View File

@ -0,0 +1,59 @@
/*
* config.go - Parsing for our global config file. The file is simply the JSON
* output of the Config protocol buffer.
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
// Package metadata contains all of the on disk structures.
// These structures are defined in metadata.proto. The package also
// contains functions for manipulating these structures, specifically:
// * Reading and Writing the Config file to disk
// * Getting and Setting Policies for directories
// * Reasonable defaults for a Policy's EncryptionOptions
package metadata
import (
"io"
"github.com/golang/protobuf/jsonpb"
)
// WriteConfig outputs the Config data as nicely formatted JSON
func WriteConfig(config *Config, out io.Writer) error {
m := jsonpb.Marshaler{
EmitDefaults: true,
EnumsAsInts: false,
Indent: "\t",
OrigName: true,
}
if err := m.Marshal(out, config); err != nil {
return err
}
_, err := out.Write([]byte{'\n'})
return err
}
// ReadConfig writes the JSON data into the config structure
func ReadConfig(in io.Reader) (*Config, error) {
config := new(Config)
// Allow (and ignore) unknown fields for forwards compatibility.
u := jsonpb.Unmarshaler{
AllowUnknownFields: true,
}
return config, u.Unmarshal(in, config)
}

57
vendor/github.com/google/fscrypt/metadata/constants.go generated vendored Normal file
View File

@ -0,0 +1,57 @@
/*
* constants.go - Some metadata constants used throughout fscrypt
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package metadata
import (
"crypto/sha256"
"golang.org/x/sys/unix"
)
// Lengths for our keys, buffers, and strings used in fscrypt.
const (
// Length of policy descriptor (in hex chars) for v1 encryption policies
PolicyDescriptorLenV1 = 2 * unix.FSCRYPT_KEY_DESCRIPTOR_SIZE
// Length of protector descriptor (in hex chars)
ProtectorDescriptorLen = PolicyDescriptorLenV1
// Length of policy descriptor (in hex chars) for v2 encryption policies
PolicyDescriptorLenV2 = 2 * unix.FSCRYPT_KEY_IDENTIFIER_SIZE
// We always use 256-bit keys internally (compared to 512-bit policy keys).
InternalKeyLen = 32
IVLen = 16
SaltLen = 16
// We use SHA256 for the HMAC, and len(HMAC) == len(hash size).
HMACLen = sha256.Size
// PolicyKeyLen is the length of all keys passed directly to the Keyring
PolicyKeyLen = unix.FSCRYPT_MAX_KEY_SIZE
)
var (
// DefaultOptions use the supported encryption modes, max padding, and
// policy version 1.
DefaultOptions = &EncryptionOptions{
Padding: 32,
Contents: EncryptionOptions_AES_256_XTS,
Filenames: EncryptionOptions_AES_256_CTS,
PolicyVersion: 1,
}
// DefaultSource is the source we use if none is specified.
DefaultSource = SourceType_custom_passphrase
)

View File

@ -0,0 +1,589 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: metadata/metadata.proto
package metadata
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Specifies the method in which an outside secret is obtained for a Protector
type SourceType int32
const (
SourceType_default SourceType = 0
SourceType_pam_passphrase SourceType = 1
SourceType_custom_passphrase SourceType = 2
SourceType_raw_key SourceType = 3
)
var SourceType_name = map[int32]string{
0: "default",
1: "pam_passphrase",
2: "custom_passphrase",
3: "raw_key",
}
var SourceType_value = map[string]int32{
"default": 0,
"pam_passphrase": 1,
"custom_passphrase": 2,
"raw_key": 3,
}
func (x SourceType) String() string {
return proto.EnumName(SourceType_name, int32(x))
}
func (SourceType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_metadata_31965d2849cb292a, []int{0}
}
// Type of encryption; should match declarations of unix.FSCRYPT_MODE
type EncryptionOptions_Mode int32
const (
EncryptionOptions_default EncryptionOptions_Mode = 0
EncryptionOptions_AES_256_XTS EncryptionOptions_Mode = 1
EncryptionOptions_AES_256_GCM EncryptionOptions_Mode = 2
EncryptionOptions_AES_256_CBC EncryptionOptions_Mode = 3
EncryptionOptions_AES_256_CTS EncryptionOptions_Mode = 4
EncryptionOptions_AES_128_CBC EncryptionOptions_Mode = 5
EncryptionOptions_AES_128_CTS EncryptionOptions_Mode = 6
EncryptionOptions_Adiantum EncryptionOptions_Mode = 9
)
var EncryptionOptions_Mode_name = map[int32]string{
0: "default",
1: "AES_256_XTS",
2: "AES_256_GCM",
3: "AES_256_CBC",
4: "AES_256_CTS",
5: "AES_128_CBC",
6: "AES_128_CTS",
9: "Adiantum",
}
var EncryptionOptions_Mode_value = map[string]int32{
"default": 0,
"AES_256_XTS": 1,
"AES_256_GCM": 2,
"AES_256_CBC": 3,
"AES_256_CTS": 4,
"AES_128_CBC": 5,
"AES_128_CTS": 6,
"Adiantum": 9,
}
func (x EncryptionOptions_Mode) String() string {
return proto.EnumName(EncryptionOptions_Mode_name, int32(x))
}
func (EncryptionOptions_Mode) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_metadata_31965d2849cb292a, []int{3, 0}
}
// Cost parameters to be used in our hashing functions.
type HashingCosts struct {
Time int64 `protobuf:"varint,2,opt,name=time,proto3" json:"time,omitempty"`
Memory int64 `protobuf:"varint,3,opt,name=memory,proto3" json:"memory,omitempty"`
Parallelism int64 `protobuf:"varint,4,opt,name=parallelism,proto3" json:"parallelism,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HashingCosts) Reset() { *m = HashingCosts{} }
func (m *HashingCosts) String() string { return proto.CompactTextString(m) }
func (*HashingCosts) ProtoMessage() {}
func (*HashingCosts) Descriptor() ([]byte, []int) {
return fileDescriptor_metadata_31965d2849cb292a, []int{0}
}
func (m *HashingCosts) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HashingCosts.Unmarshal(m, b)
}
func (m *HashingCosts) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HashingCosts.Marshal(b, m, deterministic)
}
func (dst *HashingCosts) XXX_Merge(src proto.Message) {
xxx_messageInfo_HashingCosts.Merge(dst, src)
}
func (m *HashingCosts) XXX_Size() int {
return xxx_messageInfo_HashingCosts.Size(m)
}
func (m *HashingCosts) XXX_DiscardUnknown() {
xxx_messageInfo_HashingCosts.DiscardUnknown(m)
}
var xxx_messageInfo_HashingCosts proto.InternalMessageInfo
func (m *HashingCosts) GetTime() int64 {
if m != nil {
return m.Time
}
return 0
}
func (m *HashingCosts) GetMemory() int64 {
if m != nil {
return m.Memory
}
return 0
}
func (m *HashingCosts) GetParallelism() int64 {
if m != nil {
return m.Parallelism
}
return 0
}
// This structure is used for our authenticated wrapping/unwrapping of keys.
type WrappedKeyData struct {
IV []byte `protobuf:"bytes,1,opt,name=IV,proto3" json:"IV,omitempty"`
EncryptedKey []byte `protobuf:"bytes,2,opt,name=encrypted_key,json=encryptedKey,proto3" json:"encrypted_key,omitempty"`
Hmac []byte `protobuf:"bytes,3,opt,name=hmac,proto3" json:"hmac,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *WrappedKeyData) Reset() { *m = WrappedKeyData{} }
func (m *WrappedKeyData) String() string { return proto.CompactTextString(m) }
func (*WrappedKeyData) ProtoMessage() {}
func (*WrappedKeyData) Descriptor() ([]byte, []int) {
return fileDescriptor_metadata_31965d2849cb292a, []int{1}
}
func (m *WrappedKeyData) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_WrappedKeyData.Unmarshal(m, b)
}
func (m *WrappedKeyData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_WrappedKeyData.Marshal(b, m, deterministic)
}
func (dst *WrappedKeyData) XXX_Merge(src proto.Message) {
xxx_messageInfo_WrappedKeyData.Merge(dst, src)
}
func (m *WrappedKeyData) XXX_Size() int {
return xxx_messageInfo_WrappedKeyData.Size(m)
}
func (m *WrappedKeyData) XXX_DiscardUnknown() {
xxx_messageInfo_WrappedKeyData.DiscardUnknown(m)
}
var xxx_messageInfo_WrappedKeyData proto.InternalMessageInfo
func (m *WrappedKeyData) GetIV() []byte {
if m != nil {
return m.IV
}
return nil
}
func (m *WrappedKeyData) GetEncryptedKey() []byte {
if m != nil {
return m.EncryptedKey
}
return nil
}
func (m *WrappedKeyData) GetHmac() []byte {
if m != nil {
return m.Hmac
}
return nil
}
// The associated data for each protector
type ProtectorData struct {
ProtectorDescriptor string `protobuf:"bytes,1,opt,name=protector_descriptor,json=protectorDescriptor,proto3" json:"protector_descriptor,omitempty"`
Source SourceType `protobuf:"varint,2,opt,name=source,proto3,enum=metadata.SourceType" json:"source,omitempty"`
// These are only used by some of the protector types
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
Costs *HashingCosts `protobuf:"bytes,4,opt,name=costs,proto3" json:"costs,omitempty"`
Salt []byte `protobuf:"bytes,5,opt,name=salt,proto3" json:"salt,omitempty"`
Uid int64 `protobuf:"varint,6,opt,name=uid,proto3" json:"uid,omitempty"`
WrappedKey *WrappedKeyData `protobuf:"bytes,7,opt,name=wrapped_key,json=wrappedKey,proto3" json:"wrapped_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ProtectorData) Reset() { *m = ProtectorData{} }
func (m *ProtectorData) String() string { return proto.CompactTextString(m) }
func (*ProtectorData) ProtoMessage() {}
func (*ProtectorData) Descriptor() ([]byte, []int) {
return fileDescriptor_metadata_31965d2849cb292a, []int{2}
}
func (m *ProtectorData) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ProtectorData.Unmarshal(m, b)
}
func (m *ProtectorData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ProtectorData.Marshal(b, m, deterministic)
}
func (dst *ProtectorData) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProtectorData.Merge(dst, src)
}
func (m *ProtectorData) XXX_Size() int {
return xxx_messageInfo_ProtectorData.Size(m)
}
func (m *ProtectorData) XXX_DiscardUnknown() {
xxx_messageInfo_ProtectorData.DiscardUnknown(m)
}
var xxx_messageInfo_ProtectorData proto.InternalMessageInfo
func (m *ProtectorData) GetProtectorDescriptor() string {
if m != nil {
return m.ProtectorDescriptor
}
return ""
}
func (m *ProtectorData) GetSource() SourceType {
if m != nil {
return m.Source
}
return SourceType_default
}
func (m *ProtectorData) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ProtectorData) GetCosts() *HashingCosts {
if m != nil {
return m.Costs
}
return nil
}
func (m *ProtectorData) GetSalt() []byte {
if m != nil {
return m.Salt
}
return nil
}
func (m *ProtectorData) GetUid() int64 {
if m != nil {
return m.Uid
}
return 0
}
func (m *ProtectorData) GetWrappedKey() *WrappedKeyData {
if m != nil {
return m.WrappedKey
}
return nil
}
// Encryption policy specifics, corresponds to the fscrypt_policy struct
type EncryptionOptions struct {
Padding int64 `protobuf:"varint,1,opt,name=padding,proto3" json:"padding,omitempty"`
Contents EncryptionOptions_Mode `protobuf:"varint,2,opt,name=contents,proto3,enum=metadata.EncryptionOptions_Mode" json:"contents,omitempty"`
Filenames EncryptionOptions_Mode `protobuf:"varint,3,opt,name=filenames,proto3,enum=metadata.EncryptionOptions_Mode" json:"filenames,omitempty"`
PolicyVersion int64 `protobuf:"varint,4,opt,name=policy_version,json=policyVersion,proto3" json:"policy_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EncryptionOptions) Reset() { *m = EncryptionOptions{} }
func (m *EncryptionOptions) String() string { return proto.CompactTextString(m) }
func (*EncryptionOptions) ProtoMessage() {}
func (*EncryptionOptions) Descriptor() ([]byte, []int) {
return fileDescriptor_metadata_31965d2849cb292a, []int{3}
}
func (m *EncryptionOptions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EncryptionOptions.Unmarshal(m, b)
}
func (m *EncryptionOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EncryptionOptions.Marshal(b, m, deterministic)
}
func (dst *EncryptionOptions) XXX_Merge(src proto.Message) {
xxx_messageInfo_EncryptionOptions.Merge(dst, src)
}
func (m *EncryptionOptions) XXX_Size() int {
return xxx_messageInfo_EncryptionOptions.Size(m)
}
func (m *EncryptionOptions) XXX_DiscardUnknown() {
xxx_messageInfo_EncryptionOptions.DiscardUnknown(m)
}
var xxx_messageInfo_EncryptionOptions proto.InternalMessageInfo
func (m *EncryptionOptions) GetPadding() int64 {
if m != nil {
return m.Padding
}
return 0
}
func (m *EncryptionOptions) GetContents() EncryptionOptions_Mode {
if m != nil {
return m.Contents
}
return EncryptionOptions_default
}
func (m *EncryptionOptions) GetFilenames() EncryptionOptions_Mode {
if m != nil {
return m.Filenames
}
return EncryptionOptions_default
}
func (m *EncryptionOptions) GetPolicyVersion() int64 {
if m != nil {
return m.PolicyVersion
}
return 0
}
type WrappedPolicyKey struct {
ProtectorDescriptor string `protobuf:"bytes,1,opt,name=protector_descriptor,json=protectorDescriptor,proto3" json:"protector_descriptor,omitempty"`
WrappedKey *WrappedKeyData `protobuf:"bytes,2,opt,name=wrapped_key,json=wrappedKey,proto3" json:"wrapped_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *WrappedPolicyKey) Reset() { *m = WrappedPolicyKey{} }
func (m *WrappedPolicyKey) String() string { return proto.CompactTextString(m) }
func (*WrappedPolicyKey) ProtoMessage() {}
func (*WrappedPolicyKey) Descriptor() ([]byte, []int) {
return fileDescriptor_metadata_31965d2849cb292a, []int{4}
}
func (m *WrappedPolicyKey) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_WrappedPolicyKey.Unmarshal(m, b)
}
func (m *WrappedPolicyKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_WrappedPolicyKey.Marshal(b, m, deterministic)
}
func (dst *WrappedPolicyKey) XXX_Merge(src proto.Message) {
xxx_messageInfo_WrappedPolicyKey.Merge(dst, src)
}
func (m *WrappedPolicyKey) XXX_Size() int {
return xxx_messageInfo_WrappedPolicyKey.Size(m)
}
func (m *WrappedPolicyKey) XXX_DiscardUnknown() {
xxx_messageInfo_WrappedPolicyKey.DiscardUnknown(m)
}
var xxx_messageInfo_WrappedPolicyKey proto.InternalMessageInfo
func (m *WrappedPolicyKey) GetProtectorDescriptor() string {
if m != nil {
return m.ProtectorDescriptor
}
return ""
}
func (m *WrappedPolicyKey) GetWrappedKey() *WrappedKeyData {
if m != nil {
return m.WrappedKey
}
return nil
}
// The associated data for each policy
type PolicyData struct {
KeyDescriptor string `protobuf:"bytes,1,opt,name=key_descriptor,json=keyDescriptor,proto3" json:"key_descriptor,omitempty"`
Options *EncryptionOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"`
WrappedPolicyKeys []*WrappedPolicyKey `protobuf:"bytes,3,rep,name=wrapped_policy_keys,json=wrappedPolicyKeys,proto3" json:"wrapped_policy_keys,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PolicyData) Reset() { *m = PolicyData{} }
func (m *PolicyData) String() string { return proto.CompactTextString(m) }
func (*PolicyData) ProtoMessage() {}
func (*PolicyData) Descriptor() ([]byte, []int) {
return fileDescriptor_metadata_31965d2849cb292a, []int{5}
}
func (m *PolicyData) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PolicyData.Unmarshal(m, b)
}
func (m *PolicyData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PolicyData.Marshal(b, m, deterministic)
}
func (dst *PolicyData) XXX_Merge(src proto.Message) {
xxx_messageInfo_PolicyData.Merge(dst, src)
}
func (m *PolicyData) XXX_Size() int {
return xxx_messageInfo_PolicyData.Size(m)
}
func (m *PolicyData) XXX_DiscardUnknown() {
xxx_messageInfo_PolicyData.DiscardUnknown(m)
}
var xxx_messageInfo_PolicyData proto.InternalMessageInfo
func (m *PolicyData) GetKeyDescriptor() string {
if m != nil {
return m.KeyDescriptor
}
return ""
}
func (m *PolicyData) GetOptions() *EncryptionOptions {
if m != nil {
return m.Options
}
return nil
}
func (m *PolicyData) GetWrappedPolicyKeys() []*WrappedPolicyKey {
if m != nil {
return m.WrappedPolicyKeys
}
return nil
}
// Data stored in the config file
type Config struct {
Source SourceType `protobuf:"varint,1,opt,name=source,proto3,enum=metadata.SourceType" json:"source,omitempty"`
HashCosts *HashingCosts `protobuf:"bytes,2,opt,name=hash_costs,json=hashCosts,proto3" json:"hash_costs,omitempty"`
Options *EncryptionOptions `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"`
UseFsKeyringForV1Policies bool `protobuf:"varint,5,opt,name=use_fs_keyring_for_v1_policies,json=useFsKeyringForV1Policies,proto3" json:"use_fs_keyring_for_v1_policies,omitempty"`
AllowCrossUserMetadata bool `protobuf:"varint,6,opt,name=allow_cross_user_metadata,json=allowCrossUserMetadata,proto3" json:"allow_cross_user_metadata,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Config) Reset() { *m = Config{} }
func (m *Config) String() string { return proto.CompactTextString(m) }
func (*Config) ProtoMessage() {}
func (*Config) Descriptor() ([]byte, []int) {
return fileDescriptor_metadata_31965d2849cb292a, []int{6}
}
func (m *Config) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Config.Unmarshal(m, b)
}
func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Config.Marshal(b, m, deterministic)
}
func (dst *Config) XXX_Merge(src proto.Message) {
xxx_messageInfo_Config.Merge(dst, src)
}
func (m *Config) XXX_Size() int {
return xxx_messageInfo_Config.Size(m)
}
func (m *Config) XXX_DiscardUnknown() {
xxx_messageInfo_Config.DiscardUnknown(m)
}
var xxx_messageInfo_Config proto.InternalMessageInfo
func (m *Config) GetSource() SourceType {
if m != nil {
return m.Source
}
return SourceType_default
}
func (m *Config) GetHashCosts() *HashingCosts {
if m != nil {
return m.HashCosts
}
return nil
}
func (m *Config) GetOptions() *EncryptionOptions {
if m != nil {
return m.Options
}
return nil
}
func (m *Config) GetUseFsKeyringForV1Policies() bool {
if m != nil {
return m.UseFsKeyringForV1Policies
}
return false
}
func (m *Config) GetAllowCrossUserMetadata() bool {
if m != nil {
return m.AllowCrossUserMetadata
}
return false
}
func init() {
proto.RegisterType((*HashingCosts)(nil), "metadata.HashingCosts")
proto.RegisterType((*WrappedKeyData)(nil), "metadata.WrappedKeyData")
proto.RegisterType((*ProtectorData)(nil), "metadata.ProtectorData")
proto.RegisterType((*EncryptionOptions)(nil), "metadata.EncryptionOptions")
proto.RegisterType((*WrappedPolicyKey)(nil), "metadata.WrappedPolicyKey")
proto.RegisterType((*PolicyData)(nil), "metadata.PolicyData")
proto.RegisterType((*Config)(nil), "metadata.Config")
proto.RegisterEnum("metadata.SourceType", SourceType_name, SourceType_value)
proto.RegisterEnum("metadata.EncryptionOptions_Mode", EncryptionOptions_Mode_name, EncryptionOptions_Mode_value)
}
func init() { proto.RegisterFile("metadata/metadata.proto", fileDescriptor_metadata_31965d2849cb292a) }
var fileDescriptor_metadata_31965d2849cb292a = []byte{
// 748 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xdb, 0x6a, 0xf3, 0x46,
0x10, 0xae, 0x24, 0xc7, 0x87, 0xf1, 0xa1, 0xca, 0xfe, 0x69, 0xaa, 0xb4, 0x50, 0x8c, 0x4b, 0x20,
0x94, 0x90, 0x62, 0x97, 0x94, 0x06, 0x4a, 0x21, 0x75, 0x92, 0x36, 0x09, 0xa1, 0xe9, 0xda, 0x75,
0x5b, 0x28, 0x88, 0x8d, 0xb4, 0xb6, 0x17, 0x4b, 0x5a, 0xb1, 0xbb, 0x8a, 0xd1, 0x5d, 0xef, 0xfa,
0x00, 0x7d, 0x97, 0xf6, 0x65, 0xfa, 0x30, 0x45, 0x2b, 0xc9, 0x87, 0x04, 0x42, 0xf2, 0xdf, 0x98,
0xd9, 0x6f, 0x67, 0xe6, 0x9b, 0xf9, 0x66, 0xc7, 0x82, 0x8f, 0x43, 0xaa, 0x88, 0x4f, 0x14, 0xf9,
0xb2, 0x34, 0x4e, 0x62, 0xc1, 0x15, 0x47, 0xf5, 0xf2, 0xdc, 0xfb, 0x03, 0x5a, 0x3f, 0x12, 0x39,
0x67, 0xd1, 0x6c, 0xc8, 0xa5, 0x92, 0x08, 0x41, 0x45, 0xb1, 0x90, 0x3a, 0x66, 0xd7, 0x38, 0xb2,
0xb0, 0xb6, 0xd1, 0x3e, 0x54, 0x43, 0x1a, 0x72, 0x91, 0x3a, 0x96, 0x46, 0x8b, 0x13, 0xea, 0x42,
0x33, 0x26, 0x82, 0x04, 0x01, 0x0d, 0x98, 0x0c, 0x9d, 0x8a, 0xbe, 0xdc, 0x84, 0x7a, 0xbf, 0x43,
0xe7, 0x57, 0x41, 0xe2, 0x98, 0xfa, 0xb7, 0x34, 0xbd, 0x20, 0x8a, 0xa0, 0x0e, 0x98, 0xd7, 0x13,
0xc7, 0xe8, 0x1a, 0x47, 0x2d, 0x6c, 0x5e, 0x4f, 0xd0, 0xe7, 0xd0, 0xa6, 0x91, 0x27, 0xd2, 0x58,
0x51, 0xdf, 0x5d, 0xd0, 0x54, 0x13, 0xb7, 0x70, 0x6b, 0x05, 0xde, 0xd2, 0x34, 0x2b, 0x6a, 0x1e,
0x12, 0x4f, 0xd3, 0xb7, 0xb0, 0xb6, 0x7b, 0x7f, 0x9b, 0xd0, 0xbe, 0x17, 0x5c, 0x51, 0x4f, 0x71,
0xa1, 0x53, 0xf7, 0x61, 0x2f, 0x2e, 0x01, 0xd7, 0xa7, 0xd2, 0x13, 0x2c, 0x56, 0x5c, 0x68, 0xb2,
0x06, 0x7e, 0xb7, 0xba, 0xbb, 0x58, 0x5d, 0xa1, 0x63, 0xa8, 0x4a, 0x9e, 0x08, 0x2f, 0xef, 0xb7,
0x33, 0xd8, 0x3b, 0x59, 0x09, 0x35, 0xd2, 0xf8, 0x38, 0x8d, 0x29, 0x2e, 0x7c, 0xb2, 0x32, 0x22,
0x12, 0x52, 0x5d, 0x46, 0x03, 0x6b, 0x1b, 0x1d, 0xc3, 0x8e, 0x97, 0x09, 0xa7, 0xbb, 0x6f, 0x0e,
0xf6, 0xd7, 0x09, 0x36, 0x65, 0xc5, 0xb9, 0x53, 0x96, 0x41, 0x92, 0x40, 0x39, 0x3b, 0x79, 0x23,
0x99, 0x8d, 0x6c, 0xb0, 0x12, 0xe6, 0x3b, 0x55, 0xad, 0x5e, 0x66, 0xa2, 0x33, 0x68, 0x2e, 0x73,
0xd5, 0xb4, 0x22, 0x35, 0x9d, 0xd9, 0x59, 0x67, 0xde, 0x96, 0x14, 0xc3, 0x72, 0x75, 0xee, 0xfd,
0x67, 0xc2, 0xee, 0x65, 0x2e, 0x1d, 0xe3, 0xd1, 0x4f, 0xfa, 0x57, 0x22, 0x07, 0x6a, 0x31, 0xf1,
0x7d, 0x16, 0xcd, 0xb4, 0x18, 0x16, 0x2e, 0x8f, 0xe8, 0x5b, 0xa8, 0x7b, 0x3c, 0x52, 0x34, 0x52,
0xb2, 0x90, 0xa0, 0xbb, 0xe6, 0x79, 0x96, 0xe8, 0xe4, 0x8e, 0xfb, 0x14, 0xaf, 0x22, 0xd0, 0x77,
0xd0, 0x98, 0xb2, 0x80, 0x66, 0x42, 0x48, 0xad, 0xca, 0x6b, 0xc2, 0xd7, 0x21, 0xe8, 0x10, 0x3a,
0x31, 0x0f, 0x98, 0x97, 0xba, 0x8f, 0x54, 0x48, 0xc6, 0xa3, 0xe2, 0x0d, 0xb5, 0x73, 0x74, 0x92,
0x83, 0xbd, 0xbf, 0x0c, 0xa8, 0x64, 0xa1, 0xa8, 0x09, 0x35, 0x9f, 0x4e, 0x49, 0x12, 0x28, 0xfb,
0x03, 0xf4, 0x21, 0x34, 0xcf, 0x2f, 0x47, 0xee, 0xe0, 0xf4, 0x6b, 0xf7, 0xb7, 0xf1, 0xc8, 0x36,
0x36, 0x81, 0x1f, 0x86, 0x77, 0xb6, 0xb9, 0x09, 0x0c, 0xbf, 0x1f, 0xda, 0xd6, 0x16, 0x30, 0x1e,
0xd9, 0x95, 0x12, 0xe8, 0x0f, 0xbe, 0xd1, 0x1e, 0x3b, 0x5b, 0xc0, 0x78, 0x64, 0x57, 0x51, 0x0b,
0xea, 0xe7, 0x3e, 0x23, 0x91, 0x4a, 0x42, 0xbb, 0xd1, 0xfb, 0xd3, 0x00, 0xbb, 0x50, 0xff, 0x5e,
0x97, 0x98, 0xbd, 0xce, 0xf7, 0x78, 0x77, 0x4f, 0x26, 0x6c, 0xbe, 0x61, 0xc2, 0xff, 0x18, 0x00,
0x39, 0xb7, 0x7e, 0xf4, 0x87, 0xd0, 0x59, 0xd0, 0xf4, 0x39, 0x6d, 0x7b, 0x41, 0xd3, 0x0d, 0xc2,
0x53, 0xa8, 0xf1, 0x7c, 0x08, 0x05, 0xd9, 0xa7, 0x2f, 0xcc, 0x09, 0x97, 0xbe, 0xe8, 0x06, 0xde,
0x95, 0x75, 0x16, 0x83, 0x5a, 0xd0, 0x34, 0x1b, 0xb5, 0x75, 0xd4, 0x1c, 0x7c, 0xf2, 0xac, 0xde,
0x95, 0x26, 0x78, 0x77, 0xf9, 0x04, 0x91, 0xbd, 0x7f, 0x4d, 0xa8, 0x0e, 0x79, 0x34, 0x65, 0xb3,
0x8d, 0xb5, 0x33, 0x5e, 0xb1, 0x76, 0xa7, 0x00, 0x73, 0x22, 0xe7, 0x6e, 0xbe, 0x67, 0xe6, 0x8b,
0x7b, 0xd6, 0xc8, 0x3c, 0xf3, 0x7f, 0xb2, 0x8d, 0x96, 0x2b, 0x6f, 0x68, 0xf9, 0x1c, 0x3e, 0x4b,
0x24, 0x75, 0xa7, 0x32, 0x6b, 0x55, 0xb0, 0x68, 0xe6, 0x4e, 0xb9, 0x70, 0x1f, 0xfb, 0xb9, 0x00,
0x8c, 0x4a, 0xbd, 0xbc, 0x75, 0x7c, 0x90, 0x48, 0x7a, 0x25, 0x6f, 0x73, 0x9f, 0x2b, 0x2e, 0x26,
0xfd, 0xfb, 0xc2, 0x01, 0x9d, 0xc1, 0x01, 0x09, 0x02, 0xbe, 0x74, 0x3d, 0xc1, 0xa5, 0x74, 0x13,
0x49, 0x85, 0x5b, 0x52, 0xeb, 0x3d, 0xaf, 0xe3, 0x7d, 0xed, 0x30, 0xcc, 0xee, 0x7f, 0x91, 0x54,
0xdc, 0x15, 0xb7, 0x37, 0x95, 0xba, 0x65, 0x57, 0x70, 0xdb, 0xe3, 0x61, 0x4c, 0x14, 0x7b, 0x60,
0x01, 0x53, 0xe9, 0x17, 0x3f, 0x03, 0xac, 0x65, 0xd9, 0x5e, 0x02, 0x04, 0x9d, 0x98, 0x84, 0x6e,
0x4c, 0xa4, 0x8c, 0xe7, 0x82, 0x48, 0x6a, 0x1b, 0xe8, 0x23, 0xd8, 0xf5, 0x12, 0xa9, 0xf8, 0x16,
0x6c, 0x66, 0x71, 0x82, 0x2c, 0xb3, 0xae, 0x6c, 0xeb, 0xa1, 0xaa, 0xbf, 0x03, 0x5f, 0xfd, 0x1f,
0x00, 0x00, 0xff, 0xff, 0xe2, 0x78, 0x9e, 0x2e, 0x22, 0x06, 0x00, 0x00,
}

View File

@ -0,0 +1,107 @@
/*
* metadata.proto - File which contains all of the metadata structures which we
* write to metadata files. Must be compiled with protoc to use the library.
* Compilation can be invoked with go generate.
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
// If you modify this file, be sure to run "go generate" on this package.
syntax = "proto3";
package metadata;
// Cost parameters to be used in our hashing functions.
message HashingCosts {
int64 time = 2;
int64 memory = 3;
int64 parallelism = 4;
}
// This structure is used for our authenticated wrapping/unwrapping of keys.
message WrappedKeyData {
bytes IV = 1;
bytes encrypted_key = 2;
bytes hmac = 3;
}
// Specifies the method in which an outside secret is obtained for a Protector
enum SourceType {
default = 0;
pam_passphrase = 1;
custom_passphrase = 2;
raw_key = 3;
}
// The associated data for each protector
message ProtectorData {
string protector_descriptor = 1;
SourceType source = 2;
// These are only used by some of the protector types
string name = 3;
HashingCosts costs = 4;
bytes salt = 5;
int64 uid = 6;
WrappedKeyData wrapped_key = 7;
}
// Encryption policy specifics, corresponds to the fscrypt_policy struct
message EncryptionOptions {
int64 padding = 1;
// Type of encryption; should match declarations of unix.FSCRYPT_MODE
enum Mode {
default = 0;
AES_256_XTS = 1;
AES_256_GCM = 2;
AES_256_CBC = 3;
AES_256_CTS = 4;
AES_128_CBC = 5;
AES_128_CTS = 6;
Adiantum = 9;
}
Mode contents = 2;
Mode filenames = 3;
int64 policy_version = 4;
}
message WrappedPolicyKey {
string protector_descriptor = 1;
WrappedKeyData wrapped_key = 2;
}
// The associated data for each policy
message PolicyData {
string key_descriptor = 1;
EncryptionOptions options = 2;
repeated WrappedPolicyKey wrapped_policy_keys = 3;
}
// Data stored in the config file
message Config {
SourceType source = 1;
HashingCosts hash_costs = 2;
EncryptionOptions options = 4;
bool use_fs_keyring_for_v1_policies = 5;
bool allow_cross_user_metadata = 6;
// reserve the removed field 'string compatibility = 3;'
reserved 3;
reserved "compatibility";
}

348
vendor/github.com/google/fscrypt/metadata/policy.go generated vendored Normal file
View File

@ -0,0 +1,348 @@
/*
* policy.go - Functions for getting and setting policies on a specified
* directory or file.
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package metadata
import (
"encoding/hex"
"fmt"
"log"
"math"
"os"
"os/user"
"strconv"
"unsafe"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"github.com/google/fscrypt/util"
)
var (
// ErrEncryptionNotSupported indicates that encryption is not supported
// on the given filesystem, and there is no way to enable it.
ErrEncryptionNotSupported = errors.New("encryption not supported")
// ErrEncryptionNotEnabled indicates that encryption is not supported on
// the given filesystem, but there is a way to enable it.
ErrEncryptionNotEnabled = errors.New("encryption not enabled")
)
// ErrAlreadyEncrypted indicates that the path is already encrypted.
type ErrAlreadyEncrypted struct {
Path string
}
func (err *ErrAlreadyEncrypted) Error() string {
return fmt.Sprintf("file or directory %q is already encrypted", err.Path)
}
// ErrBadEncryptionOptions indicates that unsupported encryption options were given.
type ErrBadEncryptionOptions struct {
Path string
Options *EncryptionOptions
}
func (err *ErrBadEncryptionOptions) Error() string {
return fmt.Sprintf(`cannot encrypt %q because the kernel doesn't support the requested encryption options.
The options are %s`, err.Path, err.Options)
}
// ErrDirectoryNotOwned indicates a directory can't be encrypted because it's
// owned by another user.
type ErrDirectoryNotOwned struct {
Path string
Owner uint32
}
func (err *ErrDirectoryNotOwned) Error() string {
owner := strconv.Itoa(int(err.Owner))
if u, e := user.LookupId(owner); e == nil && u.Username != "" {
owner = u.Username
}
return fmt.Sprintf(`cannot encrypt %q because it's owned by another user (%s).
Encryption can only be enabled on a directory you own, even if you have
write access to the directory.`, err.Path, owner)
}
// ErrNotEncrypted indicates that the path is not encrypted.
type ErrNotEncrypted struct {
Path string
}
func (err *ErrNotEncrypted) Error() string {
return fmt.Sprintf("file or directory %q is not encrypted", err.Path)
}
func policyIoctl(file *os.File, request uintptr, arg unsafe.Pointer) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), request, uintptr(arg))
if errno == 0 {
return nil
}
return errno
}
// Maps EncryptionOptions.Padding <-> FSCRYPT_POLICY_FLAGS
var (
paddingArray = []int64{4, 8, 16, 32}
flagsArray = []int64{unix.FSCRYPT_POLICY_FLAGS_PAD_4, unix.FSCRYPT_POLICY_FLAGS_PAD_8,
unix.FSCRYPT_POLICY_FLAGS_PAD_16, unix.FSCRYPT_POLICY_FLAGS_PAD_32}
)
// flagsToPadding returns the amount of padding specified in the policy flags.
func flagsToPadding(flags uint8) int64 {
paddingFlag := int64(flags & unix.FS_POLICY_FLAGS_PAD_MASK)
// This lookup should always succeed
padding, ok := util.Lookup(paddingFlag, flagsArray, paddingArray)
if !ok {
log.Panicf("padding flag of %x not found", paddingFlag)
}
return padding
}
func buildV1PolicyData(policy *unix.FscryptPolicyV1) *PolicyData {
return &PolicyData{
KeyDescriptor: hex.EncodeToString(policy.Master_key_descriptor[:]),
Options: &EncryptionOptions{
Padding: flagsToPadding(policy.Flags),
Contents: EncryptionOptions_Mode(policy.Contents_encryption_mode),
Filenames: EncryptionOptions_Mode(policy.Filenames_encryption_mode),
PolicyVersion: 1,
},
}
}
func buildV2PolicyData(policy *unix.FscryptPolicyV2) *PolicyData {
return &PolicyData{
KeyDescriptor: hex.EncodeToString(policy.Master_key_identifier[:]),
Options: &EncryptionOptions{
Padding: flagsToPadding(policy.Flags),
Contents: EncryptionOptions_Mode(policy.Contents_encryption_mode),
Filenames: EncryptionOptions_Mode(policy.Filenames_encryption_mode),
PolicyVersion: 2,
},
}
}
// GetPolicy returns the Policy data for the given directory or file (includes
// the KeyDescriptor and the encryption options). Returns an error if the
// path is not encrypted or the policy couldn't be retrieved.
func GetPolicy(path string) (*PolicyData, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
// First try the new version of the ioctl. This works for both v1 and v2 policies.
var arg unix.FscryptGetPolicyExArg
arg.Size = uint64(unsafe.Sizeof(arg.Policy))
policyPtr := util.Ptr(arg.Policy[:])
err = policyIoctl(file, unix.FS_IOC_GET_ENCRYPTION_POLICY_EX, unsafe.Pointer(&arg))
if err == unix.ENOTTY {
// Fall back to the old version of the ioctl. This works for v1 policies only.
err = policyIoctl(file, unix.FS_IOC_GET_ENCRYPTION_POLICY, policyPtr)
arg.Size = uint64(unsafe.Sizeof(unix.FscryptPolicyV1{}))
}
switch err {
case nil:
break
case unix.ENOTTY:
return nil, ErrEncryptionNotSupported
case unix.EOPNOTSUPP:
return nil, ErrEncryptionNotEnabled
case unix.ENODATA, unix.ENOENT:
// ENOENT was returned instead of ENODATA on some filesystems before v4.11.
return nil, &ErrNotEncrypted{path}
default:
return nil, errors.Wrapf(err, "failed to get encryption policy of %q", path)
}
switch arg.Policy[0] { // arg.policy.version
case unix.FSCRYPT_POLICY_V1:
if arg.Size != uint64(unsafe.Sizeof(unix.FscryptPolicyV1{})) {
// should never happen
return nil, errors.New("unexpected size for v1 policy")
}
return buildV1PolicyData((*unix.FscryptPolicyV1)(policyPtr)), nil
case unix.FSCRYPT_POLICY_V2:
if arg.Size != uint64(unsafe.Sizeof(unix.FscryptPolicyV2{})) {
// should never happen
return nil, errors.New("unexpected size for v2 policy")
}
return buildV2PolicyData((*unix.FscryptPolicyV2)(policyPtr)), nil
default:
return nil, errors.Errorf("unsupported encryption policy version [%d]",
arg.Policy[0])
}
}
// For improved performance, use the DIRECT_KEY flag when using ciphers that
// support it, e.g. Adiantum. It is safe because fscrypt won't reuse the key
// for any other policy. (Multiple directories with same policy are okay.)
func shouldUseDirectKeyFlag(options *EncryptionOptions) bool {
// Contents and filenames encryption modes must be the same
if options.Contents != options.Filenames {
return false
}
// Currently only Adiantum supports DIRECT_KEY.
return options.Contents == EncryptionOptions_Adiantum
}
func buildPolicyFlags(options *EncryptionOptions) uint8 {
// This lookup should always succeed (as policy is valid)
flags, ok := util.Lookup(options.Padding, paddingArray, flagsArray)
if !ok {
log.Panicf("padding of %d was not found", options.Padding)
}
if shouldUseDirectKeyFlag(options) {
flags |= unix.FSCRYPT_POLICY_FLAG_DIRECT_KEY
}
return uint8(flags)
}
func setV1Policy(file *os.File, options *EncryptionOptions, descriptorBytes []byte) error {
policy := unix.FscryptPolicyV1{
Version: unix.FSCRYPT_POLICY_V1,
Contents_encryption_mode: uint8(options.Contents),
Filenames_encryption_mode: uint8(options.Filenames),
Flags: uint8(buildPolicyFlags(options)),
}
// The descriptor should always be the correct length (as policy is valid)
if len(descriptorBytes) != unix.FSCRYPT_KEY_DESCRIPTOR_SIZE {
log.Panic("wrong descriptor size for v1 policy")
}
copy(policy.Master_key_descriptor[:], descriptorBytes)
return policyIoctl(file, unix.FS_IOC_SET_ENCRYPTION_POLICY, unsafe.Pointer(&policy))
}
func setV2Policy(file *os.File, options *EncryptionOptions, descriptorBytes []byte) error {
policy := unix.FscryptPolicyV2{
Version: unix.FSCRYPT_POLICY_V2,
Contents_encryption_mode: uint8(options.Contents),
Filenames_encryption_mode: uint8(options.Filenames),
Flags: uint8(buildPolicyFlags(options)),
}
// The descriptor should always be the correct length (as policy is valid)
if len(descriptorBytes) != unix.FSCRYPT_KEY_IDENTIFIER_SIZE {
log.Panic("wrong descriptor size for v2 policy")
}
copy(policy.Master_key_identifier[:], descriptorBytes)
return policyIoctl(file, unix.FS_IOC_SET_ENCRYPTION_POLICY, unsafe.Pointer(&policy))
}
// SetPolicy sets up the specified directory to be encrypted with the specified
// policy. Returns an error if we cannot set the policy for any reason (not a
// directory, invalid options or KeyDescriptor, etc).
func SetPolicy(path string, data *PolicyData) error {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
if err = data.CheckValidity(); err != nil {
return errors.Wrap(err, "invalid policy")
}
descriptorBytes, err := hex.DecodeString(data.KeyDescriptor)
if err != nil {
return errors.New("invalid key descriptor: " + data.KeyDescriptor)
}
switch data.Options.PolicyVersion {
case 1:
err = setV1Policy(file, data.Options, descriptorBytes)
case 2:
err = setV2Policy(file, data.Options, descriptorBytes)
default:
err = errors.Errorf("policy version of %d is invalid", data.Options.PolicyVersion)
}
if err == unix.EINVAL {
// Before kernel v4.11, many different errors all caused unix.EINVAL to be returned.
// We try to disambiguate this error here. This disambiguation will not always give
// the correct error due to a potential race condition on path.
if info, statErr := os.Stat(path); statErr != nil || !info.IsDir() {
// Checking if the path is not a directory
err = unix.ENOTDIR
} else if _, policyErr := GetPolicy(path); policyErr == nil {
// Checking if a policy is already set on this directory
err = unix.EEXIST
}
}
switch err {
case nil:
return nil
case unix.EACCES:
var stat unix.Stat_t
if statErr := unix.Stat(path, &stat); statErr == nil && stat.Uid != uint32(os.Geteuid()) {
return &ErrDirectoryNotOwned{path, stat.Uid}
}
case unix.EEXIST:
return &ErrAlreadyEncrypted{path}
case unix.EINVAL:
return &ErrBadEncryptionOptions{path, data.Options}
case unix.ENOTTY:
return ErrEncryptionNotSupported
case unix.EOPNOTSUPP:
return ErrEncryptionNotEnabled
}
return errors.Wrapf(err, "failed to set encryption policy on %q", path)
}
// CheckSupport returns an error if the filesystem containing path does not
// support filesystem encryption. This can be for many reasons including an
// incompatible kernel or filesystem or not enabling the right feature flags.
func CheckSupport(path string) error {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
// On supported directories, giving a bad policy will return EINVAL
badPolicy := unix.FscryptPolicyV1{
Version: math.MaxUint8,
Contents_encryption_mode: math.MaxUint8,
Filenames_encryption_mode: math.MaxUint8,
Flags: math.MaxUint8,
}
err = policyIoctl(file, unix.FS_IOC_SET_ENCRYPTION_POLICY, unsafe.Pointer(&badPolicy))
switch err {
case nil:
log.Panicf(`FS_IOC_SET_ENCRYPTION_POLICY succeeded when it should have failed.
Please open an issue, filesystem %q may be corrupted.`, path)
case unix.EINVAL, unix.EACCES:
return nil
case unix.ENOTTY:
return ErrEncryptionNotSupported
case unix.EOPNOTSUPP:
return ErrEncryptionNotEnabled
}
return errors.Wrapf(err, "unexpected error checking for encryption support on filesystem %q", path)
}

49
vendor/github.com/google/fscrypt/security/cache.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
/*
* cache.go - Handles cache clearing and management.
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package security
import (
"log"
"os"
"golang.org/x/sys/unix"
)
// DropFilesystemCache instructs the kernel to free the reclaimable inodes and
// dentries. This has the effect of making encrypted directories whose keys are
// not present no longer accessible. Requires root privileges.
func DropFilesystemCache() error {
// Dirty reclaimable inodes must be synced so that they will be freed.
log.Print("syncing changes to filesystem")
unix.Sync()
// See: https://www.kernel.org/doc/Documentation/sysctl/vm.txt
log.Print("freeing reclaimable inodes and dentries")
file, err := os.OpenFile("/proc/sys/vm/drop_caches", os.O_WRONLY|os.O_SYNC, 0)
if err != nil {
return err
}
defer file.Close()
// "2" just frees the reclaimable inodes and dentries. The associated
// pages to these inodes will be freed. We do not need to free the
// entire pagecache (as this will severely impact performance).
_, err = file.WriteString("2")
return err
}

156
vendor/github.com/google/fscrypt/security/privileges.go generated vendored Normal file
View File

@ -0,0 +1,156 @@
/*
* privileges.go - Functions for managing users and privileges.
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
// Package security manages:
// - Cache clearing (cache.go)
// - Privilege manipulation (privileges.go)
package security
// Use the libc versions of setreuid, setregid, and setgroups instead of the
// "sys/unix" versions. The "sys/unix" versions use the raw syscalls which
// operate on the calling thread only, whereas the libc versions operate on the
// whole process. And we need to operate on the whole process, firstly for
// pam_fscrypt to prevent the privileges of Go worker threads from diverging
// from the PAM stack's "main" thread, violating libc's assumption and causing
// an abort() later in the PAM stack; and secondly because Go code may migrate
// between OS-level threads while it's running.
//
// See also: https://github.com/golang/go/issues/1435
/*
#define _GNU_SOURCE // for getresuid and setresuid
#include <sys/types.h>
#include <unistd.h> // getting and setting uids and gids
#include <grp.h> // setgroups
*/
import "C"
import (
"log"
"os/user"
"syscall"
"github.com/pkg/errors"
"github.com/google/fscrypt/util"
)
// Privileges encapsulate the effective uid/gid and groups of a process.
type Privileges struct {
euid C.uid_t
egid C.gid_t
groups []C.gid_t
}
// ProcessPrivileges returns the process's current effective privileges.
func ProcessPrivileges() (*Privileges, error) {
ruid := C.getuid()
euid := C.geteuid()
rgid := C.getgid()
egid := C.getegid()
var groups []C.gid_t
n, err := C.getgroups(0, nil)
if n < 0 {
return nil, err
}
// If n == 0, the user isn't in any groups, so groups == nil is fine.
if n > 0 {
groups = make([]C.gid_t, n)
n, err = C.getgroups(n, &groups[0])
if n < 0 {
return nil, err
}
groups = groups[:n]
}
log.Printf("Current privs (real, effective): uid=(%d,%d) gid=(%d,%d) groups=%v",
ruid, euid, rgid, egid, groups)
return &Privileges{euid, egid, groups}, nil
}
// UserPrivileges returns the default privileges for the specified user.
func UserPrivileges(user *user.User) (*Privileges, error) {
privs := &Privileges{
euid: C.uid_t(util.AtoiOrPanic(user.Uid)),
egid: C.gid_t(util.AtoiOrPanic(user.Gid)),
}
userGroups, err := user.GroupIds()
if err != nil {
return nil, util.SystemError(err.Error())
}
privs.groups = make([]C.gid_t, len(userGroups))
for i, group := range userGroups {
privs.groups[i] = C.gid_t(util.AtoiOrPanic(group))
}
return privs, nil
}
// SetProcessPrivileges sets the privileges of the current process to have those
// specified by privs. The original privileges can be obtained by first saving
// the output of ProcessPrivileges, calling SetProcessPrivileges with the
// desired privs, then calling SetProcessPrivileges with the saved privs.
func SetProcessPrivileges(privs *Privileges) error {
log.Printf("Setting euid=%d egid=%d groups=%v", privs.euid, privs.egid, privs.groups)
// If setting privs as root, we need to set the euid to 0 first, so that
// we will have the necessary permissions to make the other changes to
// the groups/egid/euid, regardless of our original euid.
C.seteuid(0)
// Separately handle the case where the user is in no groups.
numGroups := C.size_t(len(privs.groups))
groupsPtr := (*C.gid_t)(nil)
if numGroups > 0 {
groupsPtr = &privs.groups[0]
}
if res, err := C.setgroups(numGroups, groupsPtr); res < 0 {
return errors.Wrapf(err.(syscall.Errno), "setting groups")
}
if res, err := C.setegid(privs.egid); res < 0 {
return errors.Wrapf(err.(syscall.Errno), "setting egid")
}
if res, err := C.seteuid(privs.euid); res < 0 {
return errors.Wrapf(err.(syscall.Errno), "setting euid")
}
ProcessPrivileges()
return nil
}
// SetUids sets the process's real, effective, and saved UIDs.
func SetUids(ruid, euid, suid int) error {
log.Printf("Setting ruid=%d euid=%d suid=%d", ruid, euid, suid)
// We elevate all the privs before setting them. This prevents issues
// with (ruid=1000,euid=1000,suid=0), where just a single call to
// setresuid might fail with permission denied.
if res, err := C.setresuid(0, 0, 0); res < 0 {
return errors.Wrapf(err.(syscall.Errno), "setting uids")
}
if res, err := C.setresuid(C.uid_t(ruid), C.uid_t(euid), C.uid_t(suid)); res < 0 {
return errors.Wrapf(err.(syscall.Errno), "setting uids")
}
return nil
}
// GetUids gets the process's real, effective, and saved UIDs.
func GetUids() (int, int, int) {
var ruid, euid, suid C.uid_t
C.getresuid(&ruid, &euid, &suid)
return int(ruid), int(euid), int(suid)
}

135
vendor/github.com/google/fscrypt/util/errors.go generated vendored Normal file
View File

@ -0,0 +1,135 @@
/*
* errors.go - Custom errors and error functions used by fscrypt
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package util
import (
"fmt"
"io"
"log"
"os"
"github.com/pkg/errors"
)
// ErrReader wraps an io.Reader, passing along calls to Read() until a read
// fails. Then, the error is stored, and all subsequent calls to Read() do
// nothing. This allows you to write code which has many subsequent reads and
// do all of the error checking at the end. For example:
//
// r := NewErrReader(reader)
// r.Read(foo)
// r.Read(bar)
// r.Read(baz)
// if r.Err() != nil {
// // Handle error
// }
//
// Taken from https://blog.golang.org/errors-are-values by Rob Pike.
type ErrReader struct {
r io.Reader
err error
}
// NewErrReader creates an ErrReader which wraps the provided reader.
func NewErrReader(reader io.Reader) *ErrReader {
return &ErrReader{r: reader, err: nil}
}
// Read runs ReadFull on the wrapped reader if no errors have occurred.
// Otherwise, the previous error is just returned and no reads are attempted.
func (e *ErrReader) Read(p []byte) (n int, err error) {
if e.err == nil {
n, e.err = io.ReadFull(e.r, p)
}
return n, e.err
}
// Err returns the first encountered err (or nil if no errors occurred).
func (e *ErrReader) Err() error {
return e.err
}
// ErrWriter works exactly like ErrReader, except with io.Writer.
type ErrWriter struct {
w io.Writer
err error
}
// NewErrWriter creates an ErrWriter which wraps the provided writer.
func NewErrWriter(writer io.Writer) *ErrWriter {
return &ErrWriter{w: writer, err: nil}
}
// Write runs the wrapped writer's Write if no errors have occurred. Otherwise,
// the previous error is just returned and no writes are attempted.
func (e *ErrWriter) Write(p []byte) (n int, err error) {
if e.err == nil {
n, e.err = e.w.Write(p)
}
return n, e.err
}
// Err returns the first encountered err (or nil if no errors occurred).
func (e *ErrWriter) Err() error {
return e.err
}
// CheckValidLength returns an invalid length error if expected != actual
func CheckValidLength(expected, actual int) error {
if expected == actual {
return nil
}
return fmt.Errorf("expected length of %d, got %d", expected, actual)
}
// SystemError is an error that should indicate something has gone wrong in the
// underlying system (syscall failure, bad ioctl, etc...).
type SystemError string
func (s SystemError) Error() string {
return "system error: " + string(s)
}
// NeverError panics if a non-nil error is passed in. It should be used to check
// for logic errors, not to handle recoverable errors.
func NeverError(err error) {
if err != nil {
log.Panicf("NeverError() check failed: %v", err)
}
}
var (
// testEnvVarName is the name of an environment variable that should be
// set to an empty mountpoint. This is only used for integration tests.
// If not set, integration tests are skipped.
testEnvVarName = "TEST_FILESYSTEM_ROOT"
// ErrSkipIntegration indicates integration tests shouldn't be run.
ErrSkipIntegration = errors.New("skipping integration test")
)
// TestRoot returns a the root of a filesystem specified by testEnvVarName. This
// function is only used for integration tests.
func TestRoot() (string, error) {
path := os.Getenv(testEnvVarName)
if path == "" {
return "", ErrSkipIntegration
}
return path, nil
}

163
vendor/github.com/google/fscrypt/util/util.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
/*
* util.go - Various helpers used throughout fscrypt
*
* Copyright 2017 Google Inc.
* Author: Joe Richey (joerichey@google.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
// Package util contains useful components for simplifying Go code.
//
// The package contains common error types (errors.go) and functions for
// converting arrays to pointers.
package util
import (
"bufio"
"fmt"
"log"
"os"
"os/user"
"strconv"
"unsafe"
"golang.org/x/sys/unix"
)
// Ptr converts a Go byte array to a pointer to the start of the array.
func Ptr(slice []byte) unsafe.Pointer {
if len(slice) == 0 {
return nil
}
return unsafe.Pointer(&slice[0])
}
// ByteSlice takes a pointer to some data and views it as a slice of bytes.
// Note, indexing into this slice is unsafe.
func ByteSlice(ptr unsafe.Pointer) []byte {
// Slice must fit in the smallest address space go supports.
return (*[1 << 30]byte)(ptr)[:]
}
// PointerSlice takes a pointer to an array of pointers and views it as a slice
// of pointers. Note, indexing into this slice is unsafe.
func PointerSlice(ptr unsafe.Pointer) []unsafe.Pointer {
// Slice must fit in the smallest address space go supports.
return (*[1 << 28]unsafe.Pointer)(ptr)[:]
}
// Index returns the first index i such that inVal == inArray[i].
// ok is true if we find a match, false otherwise.
func Index(inVal int64, inArray []int64) (index int, ok bool) {
for index, val := range inArray {
if val == inVal {
return index, true
}
}
return 0, false
}
// Lookup finds inVal in inArray and returns the corresponding element in
// outArray. Specifically, if inVal == inArray[i], outVal == outArray[i].
// ok is true if we find a match, false otherwise.
func Lookup(inVal int64, inArray, outArray []int64) (outVal int64, ok bool) {
index, ok := Index(inVal, inArray)
if !ok {
return 0, false
}
return outArray[index], true
}
// MinInt returns the lesser of a and b.
func MinInt(a, b int) int {
if a < b {
return a
}
return b
}
// MaxInt returns the greater of a and b.
func MaxInt(a, b int) int {
if a > b {
return a
}
return b
}
// MinInt64 returns the lesser of a and b.
func MinInt64(a, b int64) int64 {
if a < b {
return a
}
return b
}
// ReadLine returns a line of input from standard input. An empty string is
// returned if the user didn't insert anything or on error.
func ReadLine() (string, error) {
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
return scanner.Text(), scanner.Err()
}
// AtoiOrPanic converts a string to an int or it panics. Should only be used in
// situations where the input MUST be a decimal number.
func AtoiOrPanic(input string) int {
i, err := strconv.Atoi(input)
if err != nil {
panic(err)
}
return i
}
// UserFromUID returns the User corresponding to the given user id.
func UserFromUID(uid int64) (*user.User, error) {
return user.LookupId(strconv.FormatInt(uid, 10))
}
// EffectiveUser returns the user entry corresponding to the effective user.
func EffectiveUser() (*user.User, error) {
return UserFromUID(int64(os.Geteuid()))
}
// IsUserRoot checks if the effective user is root.
func IsUserRoot() bool {
return os.Geteuid() == 0
}
// Chown changes the owner of a File to a User.
func Chown(file *os.File, user *user.User) error {
uid := AtoiOrPanic(user.Uid)
gid := AtoiOrPanic(user.Gid)
return file.Chown(uid, gid)
}
// IsKernelVersionAtLeast returns true if the Linux kernel version is at least
// major.minor. If something goes wrong it assumes false.
func IsKernelVersionAtLeast(major, minor int) bool {
var uname unix.Utsname
if err := unix.Uname(&uname); err != nil {
log.Printf("Uname failed [%v], assuming old kernel", err)
return false
}
release := string(uname.Release[:])
log.Printf("Kernel version is %s", release)
var actualMajor, actualMinor int
if n, _ := fmt.Sscanf(release, "%d.%d", &actualMajor, &actualMinor); n != 2 {
log.Printf("Unrecognized uname format %q, assuming old kernel", release)
return false
}
return actualMajor > major ||
(actualMajor == major && actualMinor >= minor)
}

26
vendor/github.com/pkg/xattr/.gitignore generated vendored Normal file
View File

@ -0,0 +1,26 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
.DS_Store
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.swp

25
vendor/github.com/pkg/xattr/LICENSE generated vendored Normal file
View File

@ -0,0 +1,25 @@
Copyright (c) 2012 Dave Cheney. All rights reserved.
Copyright (c) 2014 Kuba Podgórski. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

45
vendor/github.com/pkg/xattr/README.md generated vendored Normal file
View File

@ -0,0 +1,45 @@
[![GoDoc](https://godoc.org/github.com/pkg/xattr?status.svg)](http://godoc.org/github.com/pkg/xattr)
[![Go Report Card](https://goreportcard.com/badge/github.com/pkg/xattr)](https://goreportcard.com/report/github.com/pkg/xattr)
[![Build Status](https://github.com/pkg/xattr/workflows/build/badge.svg)](https://github.com/pkg/xattr/actions?query=workflow%3Abuild)
[![Codecov](https://codecov.io/gh/pkg/xattr/branch/master/graph/badge.svg)](https://codecov.io/gh/pkg/xattr)
xattr
=====
Extended attribute support for Go (linux + darwin + freebsd + netbsd + solaris).
"Extended attributes are name:value pairs associated permanently with files and directories, similar to the environment strings associated with a process. An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty." [See more...](https://en.wikipedia.org/wiki/Extended_file_attributes)
`SetWithFlags` allows to additionally pass system flags to be forwarded to the underlying calls. FreeBSD and NetBSD do not support this and the parameter will be ignored.
The `L` variants of all functions (`LGet/LSet/...`) are identical to `Get/Set/...` except that they
do not reference a symlink that appears at the end of a path. See
[GoDoc](http://godoc.org/github.com/pkg/xattr) for details.
### Example
```go
const path = "/tmp/myfile"
const prefix = "user."
if err := xattr.Set(path, prefix+"test", []byte("test-attr-value")); err != nil {
log.Fatal(err)
}
var list []string
if list, err = xattr.List(path); err != nil {
log.Fatal(err)
}
var data []byte
if data, err = xattr.Get(path, prefix+"test"); err != nil {
log.Fatal(err)
}
if err = xattr.Remove(path, prefix+"test"); err != nil {
log.Fatal(err)
}
// One can also specify the flags parameter to be passed to the OS.
if err := xattr.SetWithFlags(path, prefix+"test", []byte("test-attr-value"), xattr.XATTR_CREATE); err != nil {
log.Fatal(err)
}
```

255
vendor/github.com/pkg/xattr/xattr.go generated vendored Normal file
View File

@ -0,0 +1,255 @@
/*
Package xattr provides support for extended attributes on linux, darwin and freebsd.
Extended attributes are name:value pairs associated permanently with files and directories,
similar to the environment strings associated with a process.
An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty.
More details you can find here: https://en.wikipedia.org/wiki/Extended_file_attributes .
All functions are provided in triples: Get/LGet/FGet, Set/LSet/FSet etc. The "L"
variant will not follow a symlink at the end of the path, and "F" variant accepts
a file descriptor instead of a path.
Example for "L" variant, assuming path is "/symlink1/symlink2", where both components are
symlinks:
Get will follow "symlink1" and "symlink2" and operate on the target of
"symlink2". LGet will follow "symlink1" but operate directly on "symlink2".
*/
package xattr
import (
"os"
"syscall"
)
// Error records an error and the operation, file path and attribute that caused it.
type Error struct {
Op string
Path string
Name string
Err error
}
func (e *Error) Error() (errstr string) {
if e.Op != "" {
errstr += e.Op
}
if e.Path != "" {
if errstr != "" {
errstr += " "
}
errstr += e.Path
}
if e.Name != "" {
if errstr != "" {
errstr += " "
}
errstr += e.Name
}
if e.Err != nil {
if errstr != "" {
errstr += ": "
}
errstr += e.Err.Error()
}
return
}
// Get retrieves extended attribute data associated with path. It will follow
// all symlinks along the path.
func Get(path, name string) ([]byte, error) {
return get(path, name, func(name string, data []byte) (int, error) {
return getxattr(path, name, data)
})
}
// LGet is like Get but does not follow a symlink at the end of the path.
func LGet(path, name string) ([]byte, error) {
return get(path, name, func(name string, data []byte) (int, error) {
return lgetxattr(path, name, data)
})
}
// FGet is like Get but accepts a os.File instead of a file path.
func FGet(f *os.File, name string) ([]byte, error) {
return get(f.Name(), name, func(name string, data []byte) (int, error) {
return fgetxattr(f, name, data)
})
}
type getxattrFunc func(name string, data []byte) (int, error)
// get contains the buffer allocation logic used by both Get and LGet.
func get(path string, name string, getxattrFunc getxattrFunc) ([]byte, error) {
const (
// Start with a 1 KB buffer for the xattr value
initialBufSize = 1024
// The theoretical maximum xattr value size on MacOS is 64 MB. On Linux it's
// much smaller at 64 KB. Unless the kernel is evil or buggy, we should never
// hit the limit.
maxBufSize = 64 * 1024 * 1024
// Function name as reported in error messages
myname = "xattr.get"
)
size := initialBufSize
for {
data := make([]byte, size)
read, err := getxattrFunc(name, data)
// If the buffer was too small to fit the value, Linux and MacOS react
// differently:
// Linux: returns an ERANGE error and "-1" bytes.
// MacOS: truncates the value and returns "size" bytes. If the value
// happens to be exactly as big as the buffer, we cannot know if it was
// truncated, and we retry with a bigger buffer. Contrary to documentation,
// MacOS never seems to return ERANGE!
// To keep the code simple, we always check both conditions, and sometimes
// double the buffer size without it being strictly necessary.
if err == syscall.ERANGE || read == size {
// The buffer was too small. Try again.
size <<= 1
if size >= maxBufSize {
return nil, &Error{myname, path, name, syscall.EOVERFLOW}
}
continue
}
if err != nil {
return nil, &Error{myname, path, name, err}
}
return data[:read], nil
}
}
// Set associates name and data together as an attribute of path.
func Set(path, name string, data []byte) error {
if err := setxattr(path, name, data, 0); err != nil {
return &Error{"xattr.Set", path, name, err}
}
return nil
}
// LSet is like Set but does not follow a symlink at
// the end of the path.
func LSet(path, name string, data []byte) error {
if err := lsetxattr(path, name, data, 0); err != nil {
return &Error{"xattr.LSet", path, name, err}
}
return nil
}
// FSet is like Set but accepts a os.File instead of a file path.
func FSet(f *os.File, name string, data []byte) error {
if err := fsetxattr(f, name, data, 0); err != nil {
return &Error{"xattr.FSet", f.Name(), name, err}
}
return nil
}
// SetWithFlags associates name and data together as an attribute of path.
// Forwards the flags parameter to the syscall layer.
func SetWithFlags(path, name string, data []byte, flags int) error {
if err := setxattr(path, name, data, flags); err != nil {
return &Error{"xattr.SetWithFlags", path, name, err}
}
return nil
}
// LSetWithFlags is like SetWithFlags but does not follow a symlink at
// the end of the path.
func LSetWithFlags(path, name string, data []byte, flags int) error {
if err := lsetxattr(path, name, data, flags); err != nil {
return &Error{"xattr.LSetWithFlags", path, name, err}
}
return nil
}
// FSetWithFlags is like SetWithFlags but accepts a os.File instead of a file path.
func FSetWithFlags(f *os.File, name string, data []byte, flags int) error {
if err := fsetxattr(f, name, data, flags); err != nil {
return &Error{"xattr.FSetWithFlags", f.Name(), name, err}
}
return nil
}
// Remove removes the attribute associated with the given path.
func Remove(path, name string) error {
if err := removexattr(path, name); err != nil {
return &Error{"xattr.Remove", path, name, err}
}
return nil
}
// LRemove is like Remove but does not follow a symlink at the end of the
// path.
func LRemove(path, name string) error {
if err := lremovexattr(path, name); err != nil {
return &Error{"xattr.LRemove", path, name, err}
}
return nil
}
// FRemove is like Remove but accepts a os.File instead of a file path.
func FRemove(f *os.File, name string) error {
if err := fremovexattr(f, name); err != nil {
return &Error{"xattr.FRemove", f.Name(), name, err}
}
return nil
}
// List retrieves a list of names of extended attributes associated
// with the given path in the file system.
func List(path string) ([]string, error) {
return list(path, func(data []byte) (int, error) {
return listxattr(path, data)
})
}
// LList is like List but does not follow a symlink at the end of the
// path.
func LList(path string) ([]string, error) {
return list(path, func(data []byte) (int, error) {
return llistxattr(path, data)
})
}
// FList is like List but accepts a os.File instead of a file path.
func FList(f *os.File) ([]string, error) {
return list(f.Name(), func(data []byte) (int, error) {
return flistxattr(f, data)
})
}
type listxattrFunc func(data []byte) (int, error)
// list contains the buffer allocation logic used by both List and LList.
func list(path string, listxattrFunc listxattrFunc) ([]string, error) {
myname := "xattr.list"
// find size.
size, err := listxattrFunc(nil)
if err != nil {
return nil, &Error{myname, path, "", err}
}
if size > 0 {
// `size + 1` because of ERANGE error when reading
// from a SMB1 mount point (https://github.com/pkg/xattr/issues/16).
buf := make([]byte, size+1)
// Read into buffer of that size.
read, err := listxattrFunc(buf)
if err != nil {
return nil, &Error{myname, path, "", err}
}
return stringsFromByteSlice(buf[:read]), nil
}
return []string{}, nil
}
// bytePtrFromSlice returns a pointer to array of bytes and a size.
func bytePtrFromSlice(data []byte) (ptr *byte, size int) {
size = len(data)
if size > 0 {
ptr = &data[0]
}
return
}

201
vendor/github.com/pkg/xattr/xattr_bsd.go generated vendored Normal file
View File

@ -0,0 +1,201 @@
//go:build freebsd || netbsd
// +build freebsd netbsd
package xattr
import (
"os"
"syscall"
"unsafe"
)
const (
// XATTR_SUPPORTED will be true if the current platform is supported
XATTR_SUPPORTED = true
EXTATTR_NAMESPACE_USER = 1
// ENOATTR is not exported by the syscall package on Linux, because it is
// an alias for ENODATA. We export it here so it is available on all
// our supported platforms.
ENOATTR = syscall.ENOATTR
)
func getxattr(path string, name string, data []byte) (int, error) {
return sysGet(syscall.SYS_EXTATTR_GET_FILE, path, name, data)
}
func lgetxattr(path string, name string, data []byte) (int, error) {
return sysGet(syscall.SYS_EXTATTR_GET_LINK, path, name, data)
}
func fgetxattr(f *os.File, name string, data []byte) (int, error) {
return getxattr(f.Name(), name, data)
}
// sysGet is called by getxattr and lgetxattr with the appropriate syscall
// number. This works because syscalls have the same signature and return
// values.
func sysGet(syscallNum uintptr, path string, name string, data []byte) (int, error) {
ptr, nbytes := bytePtrFromSlice(data)
/*
ssize_t extattr_get_file(
const char *path,
int attrnamespace,
const char *attrname,
void *data,
size_t nbytes);
ssize_t extattr_get_link(
const char *path,
int attrnamespace,
const char *attrname,
void *data,
size_t nbytes);
*/
r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))),
EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))),
uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0)
if err != syscall.Errno(0) {
return int(r0), err
}
return int(r0), nil
}
func setxattr(path string, name string, data []byte, flags int) error {
return sysSet(syscall.SYS_EXTATTR_SET_FILE, path, name, data)
}
func lsetxattr(path string, name string, data []byte, flags int) error {
return sysSet(syscall.SYS_EXTATTR_SET_LINK, path, name, data)
}
func fsetxattr(f *os.File, name string, data []byte, flags int) error {
return setxattr(f.Name(), name, data, flags)
}
// sysSet is called by setxattr and lsetxattr with the appropriate syscall
// number. This works because syscalls have the same signature and return
// values.
func sysSet(syscallNum uintptr, path string, name string, data []byte) error {
ptr, nbytes := bytePtrFromSlice(data)
/*
ssize_t extattr_set_file(
const char *path,
int attrnamespace,
const char *attrname,
const void *data,
size_t nbytes
);
ssize_t extattr_set_link(
const char *path,
int attrnamespace,
const char *attrname,
const void *data,
size_t nbytes
);
*/
r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))),
EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))),
uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0)
if err != syscall.Errno(0) {
return err
}
if int(r0) != nbytes {
return syscall.E2BIG
}
return nil
}
func removexattr(path string, name string) error {
return sysRemove(syscall.SYS_EXTATTR_DELETE_FILE, path, name)
}
func lremovexattr(path string, name string) error {
return sysRemove(syscall.SYS_EXTATTR_DELETE_LINK, path, name)
}
func fremovexattr(f *os.File, name string) error {
return removexattr(f.Name(), name)
}
// sysSet is called by removexattr and lremovexattr with the appropriate syscall
// number. This works because syscalls have the same signature and return
// values.
func sysRemove(syscallNum uintptr, path string, name string) error {
/*
int extattr_delete_file(
const char *path,
int attrnamespace,
const char *attrname
);
int extattr_delete_link(
const char *path,
int attrnamespace,
const char *attrname
);
*/
_, _, err := syscall.Syscall(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))),
EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))),
)
if err != syscall.Errno(0) {
return err
}
return nil
}
func listxattr(path string, data []byte) (int, error) {
return sysList(syscall.SYS_EXTATTR_LIST_FILE, path, data)
}
func llistxattr(path string, data []byte) (int, error) {
return sysList(syscall.SYS_EXTATTR_LIST_LINK, path, data)
}
func flistxattr(f *os.File, data []byte) (int, error) {
return listxattr(f.Name(), data)
}
// sysSet is called by listxattr and llistxattr with the appropriate syscall
// number. This works because syscalls have the same signature and return
// values.
func sysList(syscallNum uintptr, path string, data []byte) (int, error) {
ptr, nbytes := bytePtrFromSlice(data)
/*
ssize_t extattr_list_file(
const char *path,
int attrnamespace,
void *data,
size_t nbytes
);
ssize_t extattr_list_link(
const char *path,
int attrnamespace,
void *data,
size_t nbytes
);
*/
r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))),
EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0, 0)
if err != syscall.Errno(0) {
return int(r0), err
}
return int(r0), nil
}
// stringsFromByteSlice converts a sequence of attributes to a []string.
// On FreeBSD, each entry consists of a single byte containing the length
// of the attribute name, followed by the attribute name.
// The name is _not_ terminated by NULL.
func stringsFromByteSlice(buf []byte) (result []string) {
index := 0
for index < len(buf) {
next := index + 1 + int(buf[index])
result = append(result, string(buf[index+1:next]))
index = next
}
return
}

90
vendor/github.com/pkg/xattr/xattr_darwin.go generated vendored Normal file
View File

@ -0,0 +1,90 @@
//go:build darwin
// +build darwin
package xattr
import (
"os"
"syscall"
"golang.org/x/sys/unix"
)
// See https://opensource.apple.com/source/xnu/xnu-1504.15.3/bsd/sys/xattr.h.auto.html
const (
// XATTR_SUPPORTED will be true if the current platform is supported
XATTR_SUPPORTED = true
XATTR_NOFOLLOW = 0x0001
XATTR_CREATE = 0x0002
XATTR_REPLACE = 0x0004
XATTR_NOSECURITY = 0x0008
XATTR_NODEFAULT = 0x0010
XATTR_SHOWCOMPRESSION = 0x0020
// ENOATTR is not exported by the syscall package on Linux, because it is
// an alias for ENODATA. We export it here so it is available on all
// our supported platforms.
ENOATTR = syscall.ENOATTR
)
func getxattr(path string, name string, data []byte) (int, error) {
return unix.Getxattr(path, name, data)
}
func lgetxattr(path string, name string, data []byte) (int, error) {
return unix.Lgetxattr(path, name, data)
}
func fgetxattr(f *os.File, name string, data []byte) (int, error) {
return getxattr(f.Name(), name, data)
}
func setxattr(path string, name string, data []byte, flags int) error {
return unix.Setxattr(path, name, data, flags)
}
func lsetxattr(path string, name string, data []byte, flags int) error {
return unix.Lsetxattr(path, name, data, flags)
}
func fsetxattr(f *os.File, name string, data []byte, flags int) error {
return setxattr(f.Name(), name, data, flags)
}
func removexattr(path string, name string) error {
return unix.Removexattr(path, name)
}
func lremovexattr(path string, name string) error {
return unix.Lremovexattr(path, name)
}
func fremovexattr(f *os.File, name string) error {
return removexattr(f.Name(), name)
}
func listxattr(path string, data []byte) (int, error) {
return unix.Listxattr(path, data)
}
func llistxattr(path string, data []byte) (int, error) {
return unix.Llistxattr(path, data)
}
func flistxattr(f *os.File, data []byte) (int, error) {
return listxattr(f.Name(), data)
}
// stringsFromByteSlice converts a sequence of attributes to a []string.
// On Darwin and Linux, each entry is a NULL-terminated string.
func stringsFromByteSlice(buf []byte) (result []string) {
offset := 0
for index, b := range buf {
if b == 0 {
result = append(result, string(buf[offset:index]))
offset = index + 1
}
}
return
}

142
vendor/github.com/pkg/xattr/xattr_linux.go generated vendored Normal file
View File

@ -0,0 +1,142 @@
//go:build linux
// +build linux
package xattr
import (
"os"
"syscall"
"golang.org/x/sys/unix"
)
const (
// XATTR_SUPPORTED will be true if the current platform is supported
XATTR_SUPPORTED = true
XATTR_CREATE = unix.XATTR_CREATE
XATTR_REPLACE = unix.XATTR_REPLACE
// ENOATTR is not exported by the syscall package on Linux, because it is
// an alias for ENODATA. We export it here so it is available on all
// our supported platforms.
ENOATTR = syscall.ENODATA
)
// On Linux, FUSE and CIFS filesystems can return EINTR for interrupted system
// calls. This function works around this by retrying system calls until they
// stop returning EINTR.
//
// See https://github.com/golang/go/commit/6b420169d798c7ebe733487b56ea5c3fa4aab5ce.
func ignoringEINTR(fn func() error) (err error) {
for {
err = fn()
if err != unix.EINTR {
break
}
}
return err
}
func getxattr(path string, name string, data []byte) (int, error) {
var r int
err := ignoringEINTR(func() (err error) {
r, err = unix.Getxattr(path, name, data)
return err
})
return r, err
}
func lgetxattr(path string, name string, data []byte) (int, error) {
var r int
err := ignoringEINTR(func() (err error) {
r, err = unix.Lgetxattr(path, name, data)
return err
})
return r, err
}
func fgetxattr(f *os.File, name string, data []byte) (int, error) {
var r int
err := ignoringEINTR(func() (err error) {
r, err = unix.Fgetxattr(int(f.Fd()), name, data)
return err
})
return r, err
}
func setxattr(path string, name string, data []byte, flags int) error {
return ignoringEINTR(func() (err error) {
return unix.Setxattr(path, name, data, flags)
})
}
func lsetxattr(path string, name string, data []byte, flags int) error {
return ignoringEINTR(func() (err error) {
return unix.Lsetxattr(path, name, data, flags)
})
}
func fsetxattr(f *os.File, name string, data []byte, flags int) error {
return ignoringEINTR(func() (err error) {
return unix.Fsetxattr(int(f.Fd()), name, data, flags)
})
}
func removexattr(path string, name string) error {
return ignoringEINTR(func() (err error) {
return unix.Removexattr(path, name)
})
}
func lremovexattr(path string, name string) error {
return ignoringEINTR(func() (err error) {
return unix.Lremovexattr(path, name)
})
}
func fremovexattr(f *os.File, name string) error {
return ignoringEINTR(func() (err error) {
return unix.Fremovexattr(int(f.Fd()), name)
})
}
func listxattr(path string, data []byte) (int, error) {
var r int
err := ignoringEINTR(func() (err error) {
r, err = unix.Listxattr(path, data)
return err
})
return r, err
}
func llistxattr(path string, data []byte) (int, error) {
var r int
err := ignoringEINTR(func() (err error) {
r, err = unix.Llistxattr(path, data)
return err
})
return r, err
}
func flistxattr(f *os.File, data []byte) (int, error) {
var r int
err := ignoringEINTR(func() (err error) {
r, err = unix.Flistxattr(int(f.Fd()), data)
return err
})
return r, err
}
// stringsFromByteSlice converts a sequence of attributes to a []string.
// On Darwin and Linux, each entry is a NULL-terminated string.
func stringsFromByteSlice(buf []byte) (result []string) {
offset := 0
for index, b := range buf {
if b == 0 {
result = append(result, string(buf[offset:index]))
offset = index + 1
}
}
return
}

165
vendor/github.com/pkg/xattr/xattr_solaris.go generated vendored Normal file
View File

@ -0,0 +1,165 @@
//go:build solaris
// +build solaris
package xattr
import (
"os"
"syscall"
"golang.org/x/sys/unix"
)
const (
// XATTR_SUPPORTED will be true if the current platform is supported
XATTR_SUPPORTED = true
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
// ENOATTR is not exported by the syscall package on Linux, because it is
// an alias for ENODATA. We export it here so it is available on all
// our supported platforms.
ENOATTR = syscall.ENODATA
)
func getxattr(path string, name string, data []byte) (int, error) {
f, err := os.OpenFile(path, os.O_RDONLY, 0)
if err != nil {
return 0, err
}
defer func() {
_ = f.Close()
}()
return fgetxattr(f, name, data)
}
func lgetxattr(path string, name string, data []byte) (int, error) {
return 0, unix.ENOTSUP
}
func fgetxattr(f *os.File, name string, data []byte) (int, error) {
fd, err := unix.Openat(int(f.Fd()), name, unix.O_RDONLY|unix.O_XATTR, 0)
if err != nil {
return 0, err
}
defer func() {
_ = unix.Close(fd)
}()
return unix.Read(fd, data)
}
func setxattr(path string, name string, data []byte, flags int) error {
f, err := os.OpenFile(path, os.O_RDONLY, 0)
if err != nil {
return err
}
err = fsetxattr(f, name, data, flags)
if err != nil {
_ = f.Close()
return err
}
return f.Close()
}
func lsetxattr(path string, name string, data []byte, flags int) error {
return unix.ENOTSUP
}
func fsetxattr(f *os.File, name string, data []byte, flags int) error {
mode := unix.O_WRONLY | unix.O_XATTR
if flags&XATTR_REPLACE != 0 {
mode |= unix.O_TRUNC
} else if flags&XATTR_CREATE != 0 {
mode |= unix.O_CREAT | unix.O_EXCL
} else {
mode |= unix.O_CREAT | unix.O_TRUNC
}
fd, err := unix.Openat(int(f.Fd()), name, mode, 0666)
if err != nil {
return err
}
if _, err = unix.Write(fd, data); err != nil {
_ = unix.Close(fd)
return err
}
return unix.Close(fd)
}
func removexattr(path string, name string) error {
fd, err := unix.Open(path, unix.O_RDONLY|unix.O_XATTR, 0)
if err != nil {
return err
}
f := os.NewFile(uintptr(fd), path)
defer func() {
_ = f.Close()
}()
return fremovexattr(f, name)
}
func lremovexattr(path string, name string) error {
return unix.ENOTSUP
}
func fremovexattr(f *os.File, name string) error {
fd, err := unix.Openat(int(f.Fd()), ".", unix.O_XATTR, 0)
if err != nil {
return err
}
defer func() {
_ = unix.Close(fd)
}()
return unix.Unlinkat(fd, name, 0)
}
func listxattr(path string, data []byte) (int, error) {
f, err := os.OpenFile(path, os.O_RDONLY, 0)
if err != nil {
return 0, err
}
defer func() {
_ = f.Close()
}()
return flistxattr(f, data)
}
func llistxattr(path string, data []byte) (int, error) {
return 0, unix.ENOTSUP
}
func flistxattr(f *os.File, data []byte) (int, error) {
fd, err := unix.Openat(int(f.Fd()), ".", unix.O_RDONLY|unix.O_XATTR, 0)
if err != nil {
return 0, err
}
xf := os.NewFile(uintptr(fd), f.Name())
defer func() {
_ = xf.Close()
}()
names, err := xf.Readdirnames(-1)
if err != nil {
return 0, err
}
var buf []byte
for _, name := range names {
buf = append(buf, append([]byte(name), '\000')...)
}
if data == nil {
return len(buf), nil
}
return copy(data, buf), nil
}
// stringsFromByteSlice converts a sequence of attributes to a []string.
// On Darwin and Linux, each entry is a NULL-terminated string.
func stringsFromByteSlice(buf []byte) (result []string) {
offset := 0
for index, b := range buf {
if b == 0 {
result = append(result, string(buf[offset:index]))
offset = index + 1
}
}
return
}

70
vendor/github.com/pkg/xattr/xattr_unsupported.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
//go:build !linux && !freebsd && !netbsd && !darwin && !solaris
// +build !linux,!freebsd,!netbsd,!darwin,!solaris
package xattr
import (
"os"
"syscall"
)
const (
// We need to use the default for non supported operating systems
ENOATTR = syscall.ENODATA
)
// XATTR_SUPPORTED will be true if the current platform is supported
const XATTR_SUPPORTED = false
func getxattr(path string, name string, data []byte) (int, error) {
return 0, nil
}
func lgetxattr(path string, name string, data []byte) (int, error) {
return 0, nil
}
func fgetxattr(f *os.File, name string, data []byte) (int, error) {
return 0, nil
}
func setxattr(path string, name string, data []byte, flags int) error {
return nil
}
func lsetxattr(path string, name string, data []byte, flags int) error {
return nil
}
func fsetxattr(f *os.File, name string, data []byte, flags int) error {
return nil
}
func removexattr(path string, name string) error {
return nil
}
func lremovexattr(path string, name string) error {
return nil
}
func fremovexattr(f *os.File, name string) error {
return nil
}
func listxattr(path string, data []byte) (int, error) {
return 0, nil
}
func llistxattr(path string, data []byte) (int, error) {
return 0, nil
}
func flistxattr(f *os.File, data []byte) (int, error) {
return 0, nil
}
// dummy
func stringsFromByteSlice(buf []byte) (result []string) {
return []string{}
}

285
vendor/golang.org/x/crypto/argon2/argon2.go generated vendored Normal file
View File

@ -0,0 +1,285 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package argon2 implements the key derivation function Argon2.
// Argon2 was selected as the winner of the Password Hashing Competition and can
// be used to derive cryptographic keys from passwords.
//
// For a detailed specification of Argon2 see [1].
//
// If you aren't sure which function you need, use Argon2id (IDKey) and
// the parameter recommendations for your scenario.
//
//
// Argon2i
//
// Argon2i (implemented by Key) is the side-channel resistant version of Argon2.
// It uses data-independent memory access, which is preferred for password
// hashing and password-based key derivation. Argon2i requires more passes over
// memory than Argon2id to protect from trade-off attacks. The recommended
// parameters (taken from [2]) for non-interactive operations are time=3 and to
// use the maximum available memory.
//
//
// Argon2id
//
// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining
// Argon2i and Argon2d. It uses data-independent memory access for the first
// half of the first iteration over the memory and data-dependent memory access
// for the rest. Argon2id is side-channel resistant and provides better brute-
// force cost savings due to time-memory tradeoffs than Argon2i. The recommended
// parameters for non-interactive operations (taken from [2]) are time=1 and to
// use the maximum available memory.
//
// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf
// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3
package argon2
import (
"encoding/binary"
"sync"
"golang.org/x/crypto/blake2b"
)
// The Argon2 version implemented by this package.
const Version = 0x13
const (
argon2d = iota
argon2i
argon2id
)
// Key derives a key from the password, salt, and cost parameters using Argon2i
// returning a byte slice of length keyLen that can be used as cryptographic
// key. The CPU cost and parallelism degree must be greater than zero.
//
// For example, you can get a derived key for e.g. AES-256 (which needs a
// 32-byte key) by doing:
//
// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32)
//
// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number.
// If using that amount of memory (32 MB) is not possible in some contexts then
// the time parameter can be increased to compensate.
//
// The time parameter specifies the number of passes over the memory and the
// memory parameter specifies the size of the memory in KiB. For example
// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be
// adjusted to the number of available CPUs. The cost parameters should be
// increased as memory latency and CPU parallelism increases. Remember to get a
// good random salt.
func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte {
return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen)
}
// IDKey derives a key from the password, salt, and cost parameters using
// Argon2id returning a byte slice of length keyLen that can be used as
// cryptographic key. The CPU cost and parallelism degree must be greater than
// zero.
//
// For example, you can get a derived key for e.g. AES-256 (which needs a
// 32-byte key) by doing:
//
// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32)
//
// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number.
// If using that amount of memory (64 MB) is not possible in some contexts then
// the time parameter can be increased to compensate.
//
// The time parameter specifies the number of passes over the memory and the
// memory parameter specifies the size of the memory in KiB. For example
// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be
// adjusted to the numbers of available CPUs. The cost parameters should be
// increased as memory latency and CPU parallelism increases. Remember to get a
// good random salt.
func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte {
return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen)
}
func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte {
if time < 1 {
panic("argon2: number of rounds too small")
}
if threads < 1 {
panic("argon2: parallelism degree too low")
}
h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode)
memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads))
if memory < 2*syncPoints*uint32(threads) {
memory = 2 * syncPoints * uint32(threads)
}
B := initBlocks(&h0, memory, uint32(threads))
processBlocks(B, time, memory, uint32(threads), mode)
return extractKey(B, memory, uint32(threads), keyLen)
}
const (
blockLength = 128
syncPoints = 4
)
type block [blockLength]uint64
func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte {
var (
h0 [blake2b.Size + 8]byte
params [24]byte
tmp [4]byte
)
b2, _ := blake2b.New512(nil)
binary.LittleEndian.PutUint32(params[0:4], threads)
binary.LittleEndian.PutUint32(params[4:8], keyLen)
binary.LittleEndian.PutUint32(params[8:12], memory)
binary.LittleEndian.PutUint32(params[12:16], time)
binary.LittleEndian.PutUint32(params[16:20], uint32(Version))
binary.LittleEndian.PutUint32(params[20:24], uint32(mode))
b2.Write(params[:])
binary.LittleEndian.PutUint32(tmp[:], uint32(len(password)))
b2.Write(tmp[:])
b2.Write(password)
binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt)))
b2.Write(tmp[:])
b2.Write(salt)
binary.LittleEndian.PutUint32(tmp[:], uint32(len(key)))
b2.Write(tmp[:])
b2.Write(key)
binary.LittleEndian.PutUint32(tmp[:], uint32(len(data)))
b2.Write(tmp[:])
b2.Write(data)
b2.Sum(h0[:0])
return h0
}
func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block {
var block0 [1024]byte
B := make([]block, memory)
for lane := uint32(0); lane < threads; lane++ {
j := lane * (memory / threads)
binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane)
binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0)
blake2bHash(block0[:], h0[:])
for i := range B[j+0] {
B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:])
}
binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1)
blake2bHash(block0[:], h0[:])
for i := range B[j+1] {
B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:])
}
}
return B
}
func processBlocks(B []block, time, memory, threads uint32, mode int) {
lanes := memory / threads
segments := lanes / syncPoints
processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) {
var addresses, in, zero block
if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) {
in[0] = uint64(n)
in[1] = uint64(lane)
in[2] = uint64(slice)
in[3] = uint64(memory)
in[4] = uint64(time)
in[5] = uint64(mode)
}
index := uint32(0)
if n == 0 && slice == 0 {
index = 2 // we have already generated the first two blocks
if mode == argon2i || mode == argon2id {
in[6]++
processBlock(&addresses, &in, &zero)
processBlock(&addresses, &addresses, &zero)
}
}
offset := lane*lanes + slice*segments + index
var random uint64
for index < segments {
prev := offset - 1
if index == 0 && slice == 0 {
prev += lanes // last block in lane
}
if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) {
if index%blockLength == 0 {
in[6]++
processBlock(&addresses, &in, &zero)
processBlock(&addresses, &addresses, &zero)
}
random = addresses[index%blockLength]
} else {
random = B[prev][0]
}
newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index)
processBlockXOR(&B[offset], &B[prev], &B[newOffset])
index, offset = index+1, offset+1
}
wg.Done()
}
for n := uint32(0); n < time; n++ {
for slice := uint32(0); slice < syncPoints; slice++ {
var wg sync.WaitGroup
for lane := uint32(0); lane < threads; lane++ {
wg.Add(1)
go processSegment(n, slice, lane, &wg)
}
wg.Wait()
}
}
}
func extractKey(B []block, memory, threads, keyLen uint32) []byte {
lanes := memory / threads
for lane := uint32(0); lane < threads-1; lane++ {
for i, v := range B[(lane*lanes)+lanes-1] {
B[memory-1][i] ^= v
}
}
var block [1024]byte
for i, v := range B[memory-1] {
binary.LittleEndian.PutUint64(block[i*8:], v)
}
key := make([]byte, keyLen)
blake2bHash(key, block[:])
return key
}
func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 {
refLane := uint32(rand>>32) % threads
if n == 0 && slice == 0 {
refLane = lane
}
m, s := 3*segments, ((slice+1)%syncPoints)*segments
if lane == refLane {
m += index
}
if n == 0 {
m, s = slice*segments, 0
if slice == 0 || lane == refLane {
m += index
}
}
if index == 0 || lane == refLane {
m--
}
return phi(rand, uint64(m), uint64(s), refLane, lanes)
}
func phi(rand, m, s uint64, lane, lanes uint32) uint32 {
p := rand & 0xFFFFFFFF
p = (p * p) >> 32
p = (p * m) >> 32
return lane*lanes + uint32((s+m-(p+1))%uint64(lanes))
}

53
vendor/golang.org/x/crypto/argon2/blake2b.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package argon2
import (
"encoding/binary"
"hash"
"golang.org/x/crypto/blake2b"
)
// blake2bHash computes an arbitrary long hash value of in
// and writes the hash to out.
func blake2bHash(out []byte, in []byte) {
var b2 hash.Hash
if n := len(out); n < blake2b.Size {
b2, _ = blake2b.New(n, nil)
} else {
b2, _ = blake2b.New512(nil)
}
var buffer [blake2b.Size]byte
binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out)))
b2.Write(buffer[:4])
b2.Write(in)
if len(out) <= blake2b.Size {
b2.Sum(out[:0])
return
}
outLen := len(out)
b2.Sum(buffer[:0])
b2.Reset()
copy(out, buffer[:32])
out = out[32:]
for len(out) > blake2b.Size {
b2.Write(buffer[:])
b2.Sum(buffer[:0])
copy(out, buffer[:32])
out = out[32:]
b2.Reset()
}
if outLen%blake2b.Size > 0 { // outLen > 64
r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2
b2, _ = blake2b.New(outLen-32*r, nil)
}
b2.Write(buffer[:])
b2.Sum(out[:0])
}

61
vendor/golang.org/x/crypto/argon2/blamka_amd64.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build amd64 && gc && !purego
// +build amd64,gc,!purego
package argon2
import "golang.org/x/sys/cpu"
func init() {
useSSE4 = cpu.X86.HasSSE41
}
//go:noescape
func mixBlocksSSE2(out, a, b, c *block)
//go:noescape
func xorBlocksSSE2(out, a, b, c *block)
//go:noescape
func blamkaSSE4(b *block)
func processBlockSSE(out, in1, in2 *block, xor bool) {
var t block
mixBlocksSSE2(&t, in1, in2, &t)
if useSSE4 {
blamkaSSE4(&t)
} else {
for i := 0; i < blockLength; i += 16 {
blamkaGeneric(
&t[i+0], &t[i+1], &t[i+2], &t[i+3],
&t[i+4], &t[i+5], &t[i+6], &t[i+7],
&t[i+8], &t[i+9], &t[i+10], &t[i+11],
&t[i+12], &t[i+13], &t[i+14], &t[i+15],
)
}
for i := 0; i < blockLength/8; i += 2 {
blamkaGeneric(
&t[i], &t[i+1], &t[16+i], &t[16+i+1],
&t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1],
&t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1],
&t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1],
)
}
}
if xor {
xorBlocksSSE2(out, in1, in2, &t)
} else {
mixBlocksSSE2(out, in1, in2, &t)
}
}
func processBlock(out, in1, in2 *block) {
processBlockSSE(out, in1, in2, false)
}
func processBlockXOR(out, in1, in2 *block) {
processBlockSSE(out, in1, in2, true)
}

244
vendor/golang.org/x/crypto/argon2/blamka_amd64.s generated vendored Normal file
View File

@ -0,0 +1,244 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build amd64 && gc && !purego
// +build amd64,gc,!purego
#include "textflag.h"
DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
MOVO v4, t1; \
MOVO v5, v4; \
MOVO t1, v5; \
MOVO v6, t1; \
PUNPCKLQDQ v6, t2; \
PUNPCKHQDQ v7, v6; \
PUNPCKHQDQ t2, v6; \
PUNPCKLQDQ v7, t2; \
MOVO t1, v7; \
MOVO v2, t1; \
PUNPCKHQDQ t2, v7; \
PUNPCKLQDQ v3, t2; \
PUNPCKHQDQ t2, v2; \
PUNPCKLQDQ t1, t2; \
PUNPCKHQDQ t2, v3
#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
MOVO v4, t1; \
MOVO v5, v4; \
MOVO t1, v5; \
MOVO v2, t1; \
PUNPCKLQDQ v2, t2; \
PUNPCKHQDQ v3, v2; \
PUNPCKHQDQ t2, v2; \
PUNPCKLQDQ v3, t2; \
MOVO t1, v3; \
MOVO v6, t1; \
PUNPCKHQDQ t2, v3; \
PUNPCKLQDQ v7, t2; \
PUNPCKHQDQ t2, v6; \
PUNPCKLQDQ t1, t2; \
PUNPCKHQDQ t2, v7
#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \
MOVO v0, t0; \
PMULULQ v2, t0; \
PADDQ v2, v0; \
PADDQ t0, v0; \
PADDQ t0, v0; \
PXOR v0, v6; \
PSHUFD $0xB1, v6, v6; \
MOVO v4, t0; \
PMULULQ v6, t0; \
PADDQ v6, v4; \
PADDQ t0, v4; \
PADDQ t0, v4; \
PXOR v4, v2; \
PSHUFB c40, v2; \
MOVO v0, t0; \
PMULULQ v2, t0; \
PADDQ v2, v0; \
PADDQ t0, v0; \
PADDQ t0, v0; \
PXOR v0, v6; \
PSHUFB c48, v6; \
MOVO v4, t0; \
PMULULQ v6, t0; \
PADDQ v6, v4; \
PADDQ t0, v4; \
PADDQ t0, v4; \
PXOR v4, v2; \
MOVO v2, t0; \
PADDQ v2, t0; \
PSRLQ $63, v2; \
PXOR t0, v2; \
MOVO v1, t0; \
PMULULQ v3, t0; \
PADDQ v3, v1; \
PADDQ t0, v1; \
PADDQ t0, v1; \
PXOR v1, v7; \
PSHUFD $0xB1, v7, v7; \
MOVO v5, t0; \
PMULULQ v7, t0; \
PADDQ v7, v5; \
PADDQ t0, v5; \
PADDQ t0, v5; \
PXOR v5, v3; \
PSHUFB c40, v3; \
MOVO v1, t0; \
PMULULQ v3, t0; \
PADDQ v3, v1; \
PADDQ t0, v1; \
PADDQ t0, v1; \
PXOR v1, v7; \
PSHUFB c48, v7; \
MOVO v5, t0; \
PMULULQ v7, t0; \
PADDQ v7, v5; \
PADDQ t0, v5; \
PADDQ t0, v5; \
PXOR v5, v3; \
MOVO v3, t0; \
PADDQ v3, t0; \
PSRLQ $63, v3; \
PXOR t0, v3
#define LOAD_MSG_0(block, off) \
MOVOU 8*(off+0)(block), X0; \
MOVOU 8*(off+2)(block), X1; \
MOVOU 8*(off+4)(block), X2; \
MOVOU 8*(off+6)(block), X3; \
MOVOU 8*(off+8)(block), X4; \
MOVOU 8*(off+10)(block), X5; \
MOVOU 8*(off+12)(block), X6; \
MOVOU 8*(off+14)(block), X7
#define STORE_MSG_0(block, off) \
MOVOU X0, 8*(off+0)(block); \
MOVOU X1, 8*(off+2)(block); \
MOVOU X2, 8*(off+4)(block); \
MOVOU X3, 8*(off+6)(block); \
MOVOU X4, 8*(off+8)(block); \
MOVOU X5, 8*(off+10)(block); \
MOVOU X6, 8*(off+12)(block); \
MOVOU X7, 8*(off+14)(block)
#define LOAD_MSG_1(block, off) \
MOVOU 8*off+0*8(block), X0; \
MOVOU 8*off+16*8(block), X1; \
MOVOU 8*off+32*8(block), X2; \
MOVOU 8*off+48*8(block), X3; \
MOVOU 8*off+64*8(block), X4; \
MOVOU 8*off+80*8(block), X5; \
MOVOU 8*off+96*8(block), X6; \
MOVOU 8*off+112*8(block), X7
#define STORE_MSG_1(block, off) \
MOVOU X0, 8*off+0*8(block); \
MOVOU X1, 8*off+16*8(block); \
MOVOU X2, 8*off+32*8(block); \
MOVOU X3, 8*off+48*8(block); \
MOVOU X4, 8*off+64*8(block); \
MOVOU X5, 8*off+80*8(block); \
MOVOU X6, 8*off+96*8(block); \
MOVOU X7, 8*off+112*8(block)
#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \
LOAD_MSG_0(block, off); \
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \
STORE_MSG_0(block, off)
#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \
LOAD_MSG_1(block, off); \
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \
STORE_MSG_1(block, off)
// func blamkaSSE4(b *block)
TEXT ·blamkaSSE4(SB), 4, $0-8
MOVQ b+0(FP), AX
MOVOU ·c40<>(SB), X10
MOVOU ·c48<>(SB), X11
BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11)
BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11)
BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11)
BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11)
BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11)
BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11)
BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11)
BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11)
BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11)
BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11)
BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11)
BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11)
BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11)
BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11)
BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11)
BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11)
RET
// func mixBlocksSSE2(out, a, b, c *block)
TEXT ·mixBlocksSSE2(SB), 4, $0-32
MOVQ out+0(FP), DX
MOVQ a+8(FP), AX
MOVQ b+16(FP), BX
MOVQ a+24(FP), CX
MOVQ $128, BP
loop:
MOVOU 0(AX), X0
MOVOU 0(BX), X1
MOVOU 0(CX), X2
PXOR X1, X0
PXOR X2, X0
MOVOU X0, 0(DX)
ADDQ $16, AX
ADDQ $16, BX
ADDQ $16, CX
ADDQ $16, DX
SUBQ $2, BP
JA loop
RET
// func xorBlocksSSE2(out, a, b, c *block)
TEXT ·xorBlocksSSE2(SB), 4, $0-32
MOVQ out+0(FP), DX
MOVQ a+8(FP), AX
MOVQ b+16(FP), BX
MOVQ a+24(FP), CX
MOVQ $128, BP
loop:
MOVOU 0(AX), X0
MOVOU 0(BX), X1
MOVOU 0(CX), X2
MOVOU 0(DX), X3
PXOR X1, X0
PXOR X2, X0
PXOR X3, X0
MOVOU X0, 0(DX)
ADDQ $16, AX
ADDQ $16, BX
ADDQ $16, CX
ADDQ $16, DX
SUBQ $2, BP
JA loop
RET

163
vendor/golang.org/x/crypto/argon2/blamka_generic.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package argon2
var useSSE4 bool
func processBlockGeneric(out, in1, in2 *block, xor bool) {
var t block
for i := range t {
t[i] = in1[i] ^ in2[i]
}
for i := 0; i < blockLength; i += 16 {
blamkaGeneric(
&t[i+0], &t[i+1], &t[i+2], &t[i+3],
&t[i+4], &t[i+5], &t[i+6], &t[i+7],
&t[i+8], &t[i+9], &t[i+10], &t[i+11],
&t[i+12], &t[i+13], &t[i+14], &t[i+15],
)
}
for i := 0; i < blockLength/8; i += 2 {
blamkaGeneric(
&t[i], &t[i+1], &t[16+i], &t[16+i+1],
&t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1],
&t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1],
&t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1],
)
}
if xor {
for i := range t {
out[i] ^= in1[i] ^ in2[i] ^ t[i]
}
} else {
for i := range t {
out[i] = in1[i] ^ in2[i] ^ t[i]
}
}
}
func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) {
v00, v01, v02, v03 := *t00, *t01, *t02, *t03
v04, v05, v06, v07 := *t04, *t05, *t06, *t07
v08, v09, v10, v11 := *t08, *t09, *t10, *t11
v12, v13, v14, v15 := *t12, *t13, *t14, *t15
v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04))
v12 ^= v00
v12 = v12>>32 | v12<<32
v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12))
v04 ^= v08
v04 = v04>>24 | v04<<40
v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04))
v12 ^= v00
v12 = v12>>16 | v12<<48
v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12))
v04 ^= v08
v04 = v04>>63 | v04<<1
v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05))
v13 ^= v01
v13 = v13>>32 | v13<<32
v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13))
v05 ^= v09
v05 = v05>>24 | v05<<40
v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05))
v13 ^= v01
v13 = v13>>16 | v13<<48
v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13))
v05 ^= v09
v05 = v05>>63 | v05<<1
v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06))
v14 ^= v02
v14 = v14>>32 | v14<<32
v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14))
v06 ^= v10
v06 = v06>>24 | v06<<40
v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06))
v14 ^= v02
v14 = v14>>16 | v14<<48
v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14))
v06 ^= v10
v06 = v06>>63 | v06<<1
v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07))
v15 ^= v03
v15 = v15>>32 | v15<<32
v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15))
v07 ^= v11
v07 = v07>>24 | v07<<40
v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07))
v15 ^= v03
v15 = v15>>16 | v15<<48
v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15))
v07 ^= v11
v07 = v07>>63 | v07<<1
v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05))
v15 ^= v00
v15 = v15>>32 | v15<<32
v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15))
v05 ^= v10
v05 = v05>>24 | v05<<40
v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05))
v15 ^= v00
v15 = v15>>16 | v15<<48
v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15))
v05 ^= v10
v05 = v05>>63 | v05<<1
v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06))
v12 ^= v01
v12 = v12>>32 | v12<<32
v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12))
v06 ^= v11
v06 = v06>>24 | v06<<40
v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06))
v12 ^= v01
v12 = v12>>16 | v12<<48
v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12))
v06 ^= v11
v06 = v06>>63 | v06<<1
v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07))
v13 ^= v02
v13 = v13>>32 | v13<<32
v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13))
v07 ^= v08
v07 = v07>>24 | v07<<40
v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07))
v13 ^= v02
v13 = v13>>16 | v13<<48
v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13))
v07 ^= v08
v07 = v07>>63 | v07<<1
v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04))
v14 ^= v03
v14 = v14>>32 | v14<<32
v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14))
v04 ^= v09
v04 = v04>>24 | v04<<40
v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04))
v14 ^= v03
v14 = v14>>16 | v14<<48
v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14))
v04 ^= v09
v04 = v04>>63 | v04<<1
*t00, *t01, *t02, *t03 = v00, v01, v02, v03
*t04, *t05, *t06, *t07 = v04, v05, v06, v07
*t08, *t09, *t10, *t11 = v08, v09, v10, v11
*t12, *t13, *t14, *t15 = v12, v13, v14, v15
}

16
vendor/golang.org/x/crypto/argon2/blamka_ref.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !amd64 || purego || !gc
// +build !amd64 purego !gc
package argon2
func processBlock(out, in1, in2 *block) {
processBlockGeneric(out, in1, in2, false)
}
func processBlockXOR(out, in1, in2 *block) {
processBlockGeneric(out, in1, in2, true)
}

93
vendor/golang.org/x/crypto/hkdf/hkdf.go generated vendored Normal file
View File

@ -0,0 +1,93 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation
// Function (HKDF) as defined in RFC 5869.
//
// HKDF is a cryptographic key derivation function (KDF) with the goal of
// expanding limited input keying material into one or more cryptographically
// strong secret keys.
package hkdf // import "golang.org/x/crypto/hkdf"
import (
"crypto/hmac"
"errors"
"hash"
"io"
)
// Extract generates a pseudorandom key for use with Expand from an input secret
// and an optional independent salt.
//
// Only use this function if you need to reuse the extracted key with multiple
// Expand invocations and different context values. Most common scenarios,
// including the generation of multiple keys, should use New instead.
func Extract(hash func() hash.Hash, secret, salt []byte) []byte {
if salt == nil {
salt = make([]byte, hash().Size())
}
extractor := hmac.New(hash, salt)
extractor.Write(secret)
return extractor.Sum(nil)
}
type hkdf struct {
expander hash.Hash
size int
info []byte
counter byte
prev []byte
buf []byte
}
func (f *hkdf) Read(p []byte) (int, error) {
// Check whether enough data can be generated
need := len(p)
remains := len(f.buf) + int(255-f.counter+1)*f.size
if remains < need {
return 0, errors.New("hkdf: entropy limit reached")
}
// Read any leftover from the buffer
n := copy(p, f.buf)
p = p[n:]
// Fill the rest of the buffer
for len(p) > 0 {
f.expander.Reset()
f.expander.Write(f.prev)
f.expander.Write(f.info)
f.expander.Write([]byte{f.counter})
f.prev = f.expander.Sum(f.prev[:0])
f.counter++
// Copy the new batch into p
f.buf = f.prev
n = copy(p, f.buf)
p = p[n:]
}
// Save leftovers for next run
f.buf = f.buf[n:]
return need, nil
}
// Expand returns a Reader, from which keys can be read, using the given
// pseudorandom key and optional context info, skipping the extraction step.
//
// The pseudorandomKey should have been generated by Extract, or be a uniformly
// random or pseudorandom cryptographically strong key. See RFC 5869, Section
// 3.3. Most common scenarios will want to use New instead.
func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader {
expander := hmac.New(hash, pseudorandomKey)
return &hkdf{expander, expander.Size(), info, 1, nil, nil}
}
// New returns a Reader, from which keys can be read, using the given hash,
// secret, salt and context info. Salt and info can be nil.
func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader {
prk := Extract(hash, secret, salt)
return Expand(hash, prk, info)
}

3
vendor/golang.org/x/text/AUTHORS generated vendored
View File

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View File

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

View File

@ -14,19 +14,19 @@ package cases
// //
// The per-rune values have the following format: // The per-rune values have the following format:
// //
// if (exception) { // if (exception) {
// 15..4 unsigned exception index // 15..4 unsigned exception index
// } else { // } else {
// 15..8 XOR pattern or index to XOR pattern for case mapping // 15..8 XOR pattern or index to XOR pattern for case mapping
// Only 13..8 are used for XOR patterns. // Only 13..8 are used for XOR patterns.
// 7 inverseFold (fold to upper, not to lower) // 7 inverseFold (fold to upper, not to lower)
// 6 index: interpret the XOR pattern as an index // 6 index: interpret the XOR pattern as an index
// or isMid if case mode is cIgnorableUncased. // or isMid if case mode is cIgnorableUncased.
// 5..4 CCC: zero (normal or break), above or other // 5..4 CCC: zero (normal or break), above or other
// } // }
// 3 exception: interpret this value as an exception index // 3 exception: interpret this value as an exception index
// (TODO: is this bit necessary? Probably implied from case mode.) // (TODO: is this bit necessary? Probably implied from case mode.)
// 2..0 case mode // 2..0 case mode
// //
// For the non-exceptional cases, a rune must be either uncased, lowercase or // For the non-exceptional cases, a rune must be either uncased, lowercase or
// uppercase. If the rune is cased, the XOR pattern maps either a lowercase // uppercase. If the rune is cased, the XOR pattern maps either a lowercase
@ -128,37 +128,40 @@ const (
// The entry is pointed to by the exception index in an entry. It has the // The entry is pointed to by the exception index in an entry. It has the
// following format: // following format:
// //
// Header // Header:
// byte 0:
// 7..6 unused
// 5..4 CCC type (same bits as entry)
// 3 unused
// 2..0 length of fold
// //
// byte 1: // byte 0:
// 7..6 unused // 7..6 unused
// 5..3 length of 1st mapping of case type // 5..4 CCC type (same bits as entry)
// 2..0 length of 2nd mapping of case type // 3 unused
// 2..0 length of fold
// //
// case 1st 2nd // byte 1:
// lower -> upper, title // 7..6 unused
// upper -> lower, title // 5..3 length of 1st mapping of case type
// title -> lower, upper // 2..0 length of 2nd mapping of case type
//
// case 1st 2nd
// lower -> upper, title
// upper -> lower, title
// title -> lower, upper
// //
// Lengths with the value 0x7 indicate no value and implies no change. // Lengths with the value 0x7 indicate no value and implies no change.
// A length of 0 indicates a mapping to zero-length string. // A length of 0 indicates a mapping to zero-length string.
// //
// Body bytes: // Body bytes:
// case folding bytes //
// lowercase mapping bytes // case folding bytes
// uppercase mapping bytes // lowercase mapping bytes
// titlecase mapping bytes // uppercase mapping bytes
// closure mapping bytes (for NFKC_Casefold). (TODO) // titlecase mapping bytes
// closure mapping bytes (for NFKC_Casefold). (TODO)
// //
// Fallbacks: // Fallbacks:
// missing fold -> lower //
// missing title -> upper // missing fold -> lower
// all missing -> original rune // missing title -> upper
// all missing -> original rune
// //
// exceptions starts with a dummy byte to enforce that there is no zero index // exceptions starts with a dummy byte to enforce that there is no zero index
// value. // value.

View File

@ -93,8 +93,11 @@ var canonical = [numEncodings]string{
var nameMap = map[string]htmlEncoding{ var nameMap = map[string]htmlEncoding{
"unicode-1-1-utf-8": utf8, "unicode-1-1-utf-8": utf8,
"unicode11utf8": utf8,
"unicode20utf8": utf8,
"utf-8": utf8, "utf-8": utf8,
"utf8": utf8, "utf8": utf8,
"x-unicode20utf8": utf8,
"866": ibm866, "866": ibm866,
"cp866": ibm866, "cp866": ibm866,
"csibm866": ibm866, "csibm866": ibm866,
@ -307,7 +310,13 @@ var nameMap = map[string]htmlEncoding{
"iso-2022-cn-ext": replacement, "iso-2022-cn-ext": replacement,
"iso-2022-kr": replacement, "iso-2022-kr": replacement,
"replacement": replacement, "replacement": replacement,
"unicodefffe": utf16be,
"utf-16be": utf16be, "utf-16be": utf16be,
"csunicode": utf16le,
"iso-10646-ucs-2": utf16le,
"ucs-2": utf16le,
"unicode": utf16le,
"unicodefeff": utf16le,
"utf-16": utf16le, "utf-16": utf16le,
"utf-16le": utf16le, "utf-16le": utf16le,
"x-user-defined": xUserDefined, "x-user-defined": xUserDefined,

View File

@ -905,6 +905,14 @@ const (
// https://www.unicode.org/notes/tn6/ // https://www.unicode.org/notes/tn6/
BOCU1 MIB = 1020 BOCU1 MIB = 1020
// UTF7IMAP is the MIB identifier with IANA name UTF-7-IMAP.
//
// Note: This charset is used to encode Unicode in IMAP mailbox names;
// see section 5.1.3 of rfc3501 . It should never be used
// outside this context. A name has been assigned so that charset processing
// implementations can refer to it in a consistent way.
UTF7IMAP MIB = 1021
// Windows30Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.0-Latin-1. // Windows30Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.0-Latin-1.
// //
// Extended ISO 8859-1 Latin-1 for Windows 3.0. // Extended ISO 8859-1 Latin-1 for Windows 3.0.

View File

@ -55,6 +55,8 @@ loop:
// Microsoft's Code Page 936 extends GBK 1.0 to encode the euro sign U+20AC // Microsoft's Code Page 936 extends GBK 1.0 to encode the euro sign U+20AC
// as 0x80. The HTML5 specification at http://encoding.spec.whatwg.org/#gbk // as 0x80. The HTML5 specification at http://encoding.spec.whatwg.org/#gbk
// says to treat "gbk" as Code Page 936. // says to treat "gbk" as Code Page 936.
// GBKs decoder is gb18030s decoder. https://encoding.spec.whatwg.org/#gbk-decoder
// If byte is 0x80, return code point U+20AC. https://encoding.spec.whatwg.org/#gb18030-decoder
case c0 == 0x80: case c0 == 0x80:
r, size = '€', 1 r, size = '€', 1
@ -180,7 +182,9 @@ func (e gbkEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err
// Microsoft's Code Page 936 extends GBK 1.0 to encode the euro sign U+20AC // Microsoft's Code Page 936 extends GBK 1.0 to encode the euro sign U+20AC
// as 0x80. The HTML5 specification at http://encoding.spec.whatwg.org/#gbk // as 0x80. The HTML5 specification at http://encoding.spec.whatwg.org/#gbk
// says to treat "gbk" as Code Page 936. // says to treat "gbk" as Code Page 936.
if r == '€' { // GBKs encoder is gb18030s encoder with its _is GBK_ set to true. https://encoding.spec.whatwg.org/#gbk-encoder
// If _is GBK_ is true and code point is U+20AC, return byte 0x80. https://encoding.spec.whatwg.org/#gb18030-encoder
if !e.gb18030 && r == '€' {
r = 0x80 r = 0x80
goto write1 goto write1
} }

View File

@ -966,7 +966,7 @@ var coreTags = []language.CompactCoreInfo{ // 773 elements
0x3fd00000, 0x3fd00072, 0x3fd000da, 0x3fd0010c, 0x3fd00000, 0x3fd00072, 0x3fd000da, 0x3fd0010c,
0x3ff00000, 0x3ff000d1, 0x40100000, 0x401000c3, 0x3ff00000, 0x3ff000d1, 0x40100000, 0x401000c3,
0x40200000, 0x4020004c, 0x40700000, 0x40800000, 0x40200000, 0x4020004c, 0x40700000, 0x40800000,
0x4085a000, 0x4085a0ba, 0x408e3000, 0x408e30ba, 0x4085a000, 0x4085a0ba, 0x408e8000, 0x408e80ba,
0x40c00000, 0x40c000b3, 0x41200000, 0x41200111, 0x40c00000, 0x40c000b3, 0x41200000, 0x41200111,
0x41600000, 0x4160010f, 0x41c00000, 0x41d00000, 0x41600000, 0x4160010f, 0x41c00000, 0x41d00000,
// Entry 280 - 29F // Entry 280 - 29F
@ -994,7 +994,7 @@ var coreTags = []language.CompactCoreInfo{ // 773 elements
0x4ae00130, 0x4b400000, 0x4b400099, 0x4b4000e8, 0x4ae00130, 0x4b400000, 0x4b400099, 0x4b4000e8,
0x4bc00000, 0x4bc05000, 0x4bc05024, 0x4bc20000, 0x4bc00000, 0x4bc05000, 0x4bc05024, 0x4bc20000,
0x4bc20137, 0x4bc5a000, 0x4bc5a137, 0x4be00000, 0x4bc20137, 0x4bc5a000, 0x4bc5a137, 0x4be00000,
0x4be5a000, 0x4be5a0b4, 0x4beeb000, 0x4beeb0b4, 0x4be5a000, 0x4be5a0b4, 0x4bef1000, 0x4bef10b4,
0x4c000000, 0x4c300000, 0x4c30013e, 0x4c900000, 0x4c000000, 0x4c300000, 0x4c30013e, 0x4c900000,
// Entry 2E0 - 2FF // Entry 2E0 - 2FF
0x4c900001, 0x4cc00000, 0x4cc0012f, 0x4ce00000, 0x4c900001, 0x4cc00000, 0x4cc0012f, 0x4ce00000,
@ -1012,4 +1012,4 @@ var coreTags = []language.CompactCoreInfo{ // 773 elements
const specialTagsStr string = "ca-ES-valencia en-US-u-va-posix" const specialTagsStr string = "ca-ES-valencia en-US-u-va-posix"
// Total table size 3147 bytes (3KiB); checksum: BE816D44 // Total table size 3147 bytes (3KiB); checksum: 6772C83C

View File

@ -328,7 +328,7 @@ func (r Region) IsPrivateUse() bool {
return r.typ()&iso3166UserAssigned != 0 return r.typ()&iso3166UserAssigned != 0
} }
type Script uint8 type Script uint16
// getScriptID returns the script id for string s. It assumes that s // getScriptID returns the script id for string s. It assumes that s
// is of the format [A-Z][a-z]{3}. // is of the format [A-Z][a-z]{3}.

View File

@ -270,7 +270,7 @@ func parse(scan *scanner, s string) (t Tag, err error) {
} else if n >= 4 { } else if n >= 4 {
return Und, ErrSyntax return Und, ErrSyntax
} else { // the usual case } else { // the usual case
t, end = parseTag(scan) t, end = parseTag(scan, true)
if n := len(scan.token); n == 1 { if n := len(scan.token); n == 1 {
t.pExt = uint16(end) t.pExt = uint16(end)
end = parseExtensions(scan) end = parseExtensions(scan)
@ -296,7 +296,8 @@ func parse(scan *scanner, s string) (t Tag, err error) {
// parseTag parses language, script, region and variants. // parseTag parses language, script, region and variants.
// It returns a Tag and the end position in the input that was parsed. // It returns a Tag and the end position in the input that was parsed.
func parseTag(scan *scanner) (t Tag, end int) { // If doNorm is true, then <lang>-<extlang> will be normalized to <extlang>.
func parseTag(scan *scanner, doNorm bool) (t Tag, end int) {
var e error var e error
// TODO: set an error if an unknown lang, script or region is encountered. // TODO: set an error if an unknown lang, script or region is encountered.
t.LangID, e = getLangID(scan.token) t.LangID, e = getLangID(scan.token)
@ -307,14 +308,17 @@ func parseTag(scan *scanner) (t Tag, end int) {
for len(scan.token) == 3 && isAlpha(scan.token[0]) { for len(scan.token) == 3 && isAlpha(scan.token[0]) {
// From http://tools.ietf.org/html/bcp47, <lang>-<extlang> tags are equivalent // From http://tools.ietf.org/html/bcp47, <lang>-<extlang> tags are equivalent
// to a tag of the form <extlang>. // to a tag of the form <extlang>.
lang, e := getLangID(scan.token) if doNorm {
if lang != 0 { lang, e := getLangID(scan.token)
t.LangID = lang if lang != 0 {
copy(scan.b[langStart:], lang.String()) t.LangID = lang
scan.b[langStart+3] = '-' langStr := lang.String()
scan.start = langStart + 4 copy(scan.b[langStart:], langStr)
scan.b[langStart+len(langStr)] = '-'
scan.start = langStart + len(langStr) + 1
}
scan.gobble(e)
} }
scan.gobble(e)
end = scan.scan() end = scan.scan()
} }
if len(scan.token) == 4 && isAlpha(scan.token[0]) { if len(scan.token) == 4 && isAlpha(scan.token[0]) {
@ -559,7 +563,7 @@ func parseExtension(scan *scanner) int {
case 't': // https://www.ietf.org/rfc/rfc6497.txt case 't': // https://www.ietf.org/rfc/rfc6497.txt
scan.scan() scan.scan()
if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) { if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) {
_, end = parseTag(scan) _, end = parseTag(scan, false)
scan.toLower(start, end) scan.toLower(start, end)
} }
for len(scan.token) == 2 && !isAlpha(scan.token[1]) { for len(scan.token) == 2 && !isAlpha(scan.token[1]) {

File diff suppressed because it is too large Load Diff

View File

@ -74,7 +74,7 @@ type AcceptRange struct {
// AcceptRanges is a slice of AcceptRange values. For a given byte sequence b // AcceptRanges is a slice of AcceptRange values. For a given byte sequence b
// //
// AcceptRanges[First[b[0]]>>AcceptShift] // AcceptRanges[First[b[0]]>>AcceptShift]
// //
// will give the value of AcceptRange for the multi-byte UTF-8 sequence starting // will give the value of AcceptRange for the multi-byte UTF-8 sequence starting
// at b[0]. // at b[0].

View File

@ -10,18 +10,17 @@
// and provides the user with the best experience // and provides the user with the best experience
// (see https://blog.golang.org/matchlang). // (see https://blog.golang.org/matchlang).
// //
// // # Matching preferred against supported languages
// Matching preferred against supported languages
// //
// A Matcher for an application that supports English, Australian English, // A Matcher for an application that supports English, Australian English,
// Danish, and standard Mandarin can be created as follows: // Danish, and standard Mandarin can be created as follows:
// //
// var matcher = language.NewMatcher([]language.Tag{ // var matcher = language.NewMatcher([]language.Tag{
// language.English, // The first language is used as fallback. // language.English, // The first language is used as fallback.
// language.MustParse("en-AU"), // language.MustParse("en-AU"),
// language.Danish, // language.Danish,
// language.Chinese, // language.Chinese,
// }) // })
// //
// This list of supported languages is typically implied by the languages for // This list of supported languages is typically implied by the languages for
// which there exists translations of the user interface. // which there exists translations of the user interface.
@ -30,14 +29,14 @@
// language tags. // language tags.
// The MatchString finds best matches for such strings: // The MatchString finds best matches for such strings:
// //
// handler(w http.ResponseWriter, r *http.Request) { // handler(w http.ResponseWriter, r *http.Request) {
// lang, _ := r.Cookie("lang") // lang, _ := r.Cookie("lang")
// accept := r.Header.Get("Accept-Language") // accept := r.Header.Get("Accept-Language")
// tag, _ := language.MatchStrings(matcher, lang.String(), accept) // tag, _ := language.MatchStrings(matcher, lang.String(), accept)
// //
// // tag should now be used for the initialization of any // // tag should now be used for the initialization of any
// // locale-specific service. // // locale-specific service.
// } // }
// //
// The Matcher's Match method can be used to match Tags directly. // The Matcher's Match method can be used to match Tags directly.
// //
@ -48,8 +47,7 @@
// For instance, it will know that a reader of Bokmål Danish can read Norwegian // For instance, it will know that a reader of Bokmål Danish can read Norwegian
// and will know that Cantonese ("yue") is a good match for "zh-HK". // and will know that Cantonese ("yue") is a good match for "zh-HK".
// //
// // # Using match results
// Using match results
// //
// To guarantee a consistent user experience to the user it is important to // To guarantee a consistent user experience to the user it is important to
// use the same language tag for the selection of any locale-specific services. // use the same language tag for the selection of any locale-specific services.
@ -58,9 +56,9 @@
// More subtly confusing is using the wrong sorting order or casing // More subtly confusing is using the wrong sorting order or casing
// algorithm for a certain language. // algorithm for a certain language.
// //
// All the packages in x/text that provide locale-specific services // All the packages in x/text that provide locale-specific services
// (e.g. collate, cases) should be initialized with the tag that was // (e.g. collate, cases) should be initialized with the tag that was
// obtained at the start of an interaction with the user. // obtained at the start of an interaction with the user.
// //
// Note that Tag that is returned by Match and MatchString may differ from any // Note that Tag that is returned by Match and MatchString may differ from any
// of the supported languages, as it may contain carried over settings from // of the supported languages, as it may contain carried over settings from
@ -70,8 +68,7 @@
// Match and MatchString both return the index of the matched supported tag // Match and MatchString both return the index of the matched supported tag
// to simplify associating such data with the matched tag. // to simplify associating such data with the matched tag.
// //
// // # Canonicalization
// Canonicalization
// //
// If one uses the Matcher to compare languages one does not need to // If one uses the Matcher to compare languages one does not need to
// worry about canonicalization. // worry about canonicalization.
@ -92,10 +89,9 @@
// equivalence relations. The CanonType type can be used to alter the // equivalence relations. The CanonType type can be used to alter the
// canonicalization form. // canonicalization form.
// //
// References // # References
// //
// BCP 47 - Tags for Identifying Languages http://tools.ietf.org/html/bcp47 // BCP 47 - Tags for Identifying Languages http://tools.ietf.org/html/bcp47
//
package language // import "golang.org/x/text/language" package language // import "golang.org/x/text/language"
// TODO: explanation on how to match languages for your own locale-specific // TODO: explanation on how to match languages for your own locale-specific

View File

@ -545,7 +545,7 @@ type bestMatch struct {
// match as the preferred match. // match as the preferred match.
// //
// If pin is true and have and tag are a strong match, it will henceforth only // If pin is true and have and tag are a strong match, it will henceforth only
// consider matches for this language. This corresponds to the nothing that most // consider matches for this language. This corresponds to the idea that most
// users have a strong preference for the first defined language. A user can // users have a strong preference for the first defined language. A user can
// still prefer a second language over a dialect of the preferred language by // still prefer a second language over a dialect of the preferred language by
// explicitly specifying dialects, e.g. "en, nl, en-GB". In this case pin should // explicitly specifying dialects, e.g. "en, nl, en-GB". In this case pin should

View File

@ -147,6 +147,7 @@ func update(b *language.Builder, part ...interface{}) (err error) {
} }
var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight") var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight")
var errTagListTooLarge = errors.New("tag list exceeds max length")
// ParseAcceptLanguage parses the contents of an Accept-Language header as // ParseAcceptLanguage parses the contents of an Accept-Language header as
// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and // defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and
@ -164,6 +165,10 @@ func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) {
} }
}() }()
if strings.Count(s, "-") > 1000 {
return nil, nil, errTagListTooLarge
}
var entry string var entry string
for s != "" { for s != "" {
if entry, s = split(s, ','); entry == "" { if entry, s = split(s, ','); entry == "" {

View File

@ -39,12 +39,12 @@ const (
_Hani = 57 _Hani = 57
_Hans = 59 _Hans = 59
_Hant = 60 _Hant = 60
_Qaaa = 143 _Qaaa = 147
_Qaai = 151 _Qaai = 155
_Qabx = 192 _Qabx = 196
_Zinh = 245 _Zinh = 252
_Zyyy = 250 _Zyyy = 257
_Zzzz = 251 _Zzzz = 258
) )
var regionToGroups = []uint8{ // 358 elements var regionToGroups = []uint8{ // 358 elements
@ -265,9 +265,9 @@ var matchScript = []scriptIntelligibility{ // 26 elements
13: {wantLang: 0x39d, haveLang: 0x139, wantScript: 0x36, haveScript: 0x5a, distance: 0xa}, 13: {wantLang: 0x39d, haveLang: 0x139, wantScript: 0x36, haveScript: 0x5a, distance: 0xa},
14: {wantLang: 0x3be, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5a, distance: 0xa}, 14: {wantLang: 0x3be, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5a, distance: 0xa},
15: {wantLang: 0x3fa, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5a, distance: 0xa}, 15: {wantLang: 0x3fa, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5a, distance: 0xa},
16: {wantLang: 0x40c, haveLang: 0x139, wantScript: 0xcf, haveScript: 0x5a, distance: 0xa}, 16: {wantLang: 0x40c, haveLang: 0x139, wantScript: 0xd4, haveScript: 0x5a, distance: 0xa},
17: {wantLang: 0x450, haveLang: 0x139, wantScript: 0xde, haveScript: 0x5a, distance: 0xa}, 17: {wantLang: 0x450, haveLang: 0x139, wantScript: 0xe3, haveScript: 0x5a, distance: 0xa},
18: {wantLang: 0x461, haveLang: 0x139, wantScript: 0xe1, haveScript: 0x5a, distance: 0xa}, 18: {wantLang: 0x461, haveLang: 0x139, wantScript: 0xe6, haveScript: 0x5a, distance: 0xa},
19: {wantLang: 0x46f, haveLang: 0x139, wantScript: 0x2c, haveScript: 0x5a, distance: 0xa}, 19: {wantLang: 0x46f, haveLang: 0x139, wantScript: 0x2c, haveScript: 0x5a, distance: 0xa},
20: {wantLang: 0x476, haveLang: 0x3e2, wantScript: 0x5a, haveScript: 0x20, distance: 0xa}, 20: {wantLang: 0x476, haveLang: 0x3e2, wantScript: 0x5a, haveScript: 0x20, distance: 0xa},
21: {wantLang: 0x4b4, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5a, distance: 0xa}, 21: {wantLang: 0x4b4, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5a, distance: 0xa},

View File

@ -193,14 +193,14 @@ func (p *paragraph) run() {
// //
// At the end of this function: // At the end of this function:
// //
// - The member variable matchingPDI is set to point to the index of the // - The member variable matchingPDI is set to point to the index of the
// matching PDI character for each isolate initiator character. If there is // matching PDI character for each isolate initiator character. If there is
// no matching PDI, it is set to the length of the input text. For other // no matching PDI, it is set to the length of the input text. For other
// characters, it is set to -1. // characters, it is set to -1.
// - The member variable matchingIsolateInitiator is set to point to the // - The member variable matchingIsolateInitiator is set to point to the
// index of the matching isolate initiator character for each PDI character. // index of the matching isolate initiator character for each PDI character.
// If there is no matching isolate initiator, or the character is not a PDI, // If there is no matching isolate initiator, or the character is not a PDI,
// it is set to -1. // it is set to -1.
func (p *paragraph) determineMatchingIsolates() { func (p *paragraph) determineMatchingIsolates() {
p.matchingPDI = make([]int, p.Len()) p.matchingPDI = make([]int, p.Len())
p.matchingIsolateInitiator = make([]int, p.Len()) p.matchingIsolateInitiator = make([]int, p.Len())
@ -435,7 +435,7 @@ func maxLevel(a, b level) level {
} }
// Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types,
// either L or R, for each isolating run sequence. // either L or R, for each isolating run sequence.
func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
length := len(indexes) length := len(indexes)
types := make([]Class, length) types := make([]Class, length)
@ -495,9 +495,9 @@ func (s *isolatingRunSequence) resolveWeakTypes() {
if t == NSM { if t == NSM {
s.types[i] = precedingCharacterType s.types[i] = precedingCharacterType
} else { } else {
if t.in(LRI, RLI, FSI, PDI) { // if t.in(LRI, RLI, FSI, PDI) {
precedingCharacterType = ON // precedingCharacterType = ON
} // }
precedingCharacterType = t precedingCharacterType = t
} }
} }
@ -905,7 +905,7 @@ func (p *paragraph) getLevels(linebreaks []int) []level {
// Lines are concatenated from left to right. So for example, the fifth // Lines are concatenated from left to right. So for example, the fifth
// character from the left on the third line is // character from the left on the third line is
// //
// getReordering(linebreaks)[linebreaks[1] + 4] // getReordering(linebreaks)[linebreaks[1] + 4]
// //
// (linebreaks[1] is the position after the last character of the second // (linebreaks[1] is the position after the last character of the second
// line, which is also the index of the first character on the third line, // line, which is also the index of the first character on the third line,

View File

@ -110,10 +110,11 @@ func (p Properties) BoundaryAfter() bool {
} }
// We pack quick check data in 4 bits: // We pack quick check data in 4 bits:
// 5: Combines forward (0 == false, 1 == true) //
// 4..3: NFC_QC Yes(00), No (10), or Maybe (11) // 5: Combines forward (0 == false, 1 == true)
// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition. // 4..3: NFC_QC Yes(00), No (10), or Maybe (11)
// 1..0: Number of trailing non-starters. // 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition.
// 1..0: Number of trailing non-starters.
// //
// When all 4 bits are zero, the character is inert, meaning it is never // When all 4 bits are zero, the character is inert, meaning it is never
// influenced by normalization. // influenced by normalization.

View File

@ -18,16 +18,17 @@ import (
// A Form denotes a canonical representation of Unicode code points. // A Form denotes a canonical representation of Unicode code points.
// The Unicode-defined normalization and equivalence forms are: // The Unicode-defined normalization and equivalence forms are:
// //
// NFC Unicode Normalization Form C // NFC Unicode Normalization Form C
// NFD Unicode Normalization Form D // NFD Unicode Normalization Form D
// NFKC Unicode Normalization Form KC // NFKC Unicode Normalization Form KC
// NFKD Unicode Normalization Form KD // NFKD Unicode Normalization Form KD
// //
// For a Form f, this documentation uses the notation f(x) to mean // For a Form f, this documentation uses the notation f(x) to mean
// the bytes or string x converted to the given form. // the bytes or string x converted to the given form.
// A position n in x is called a boundary if conversion to the form can // A position n in x is called a boundary if conversion to the form can
// proceed independently on both sides: // proceed independently on both sides:
// f(x) == append(f(x[0:n]), f(x[n:])...) //
// f(x) == append(f(x[0:n]), f(x[n:])...)
// //
// References: https://unicode.org/reports/tr15/ and // References: https://unicode.org/reports/tr15/ and
// https://unicode.org/notes/tn5/. // https://unicode.org/notes/tn5/.

View File

@ -7315,7 +7315,7 @@ const recompMapPacked = "" +
"\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C
"\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D
"\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E
"\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F "\x00v\x03#\x00\x00\x1e\x7f" + // 0x00760323: 0x00001E7F
"\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80
"\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81
"\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82
@ -7342,7 +7342,7 @@ const recompMapPacked = "" +
"\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97
"\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98
"\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99
"\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B "\x01\x7f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B
"\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0
"\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1
"\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2

View File

@ -1146,21 +1146,31 @@ var widthIndex = [1408]uint8{
} }
// inverseData contains 4-byte entries of the following format: // inverseData contains 4-byte entries of the following format:
// <length> <modified UTF-8-encoded rune> <0 padding> //
// <length> <modified UTF-8-encoded rune> <0 padding>
//
// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the // The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the
// UTF-8 encoding of the original rune. Mappings often have the following // UTF-8 encoding of the original rune. Mappings often have the following
// pattern: // pattern:
// -> A (U+FF21 -> U+0041) //
// -> B (U+FF22 -> U+0042) // -> A (U+FF21 -> U+0041)
// ... // -> B (U+FF22 -> U+0042)
// ...
//
// By xor-ing the last byte the same entry can be shared by many mappings. This // By xor-ing the last byte the same entry can be shared by many mappings. This
// reduces the total number of distinct entries by about two thirds. // reduces the total number of distinct entries by about two thirds.
// The resulting entry for the aforementioned mappings is // The resulting entry for the aforementioned mappings is
// { 0x01, 0xE0, 0x00, 0x00 } //
// { 0x01, 0xE0, 0x00, 0x00 }
//
// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get // Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get
// E0 ^ A1 = 41. //
// E0 ^ A1 = 41.
//
// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get // Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get
// E0 ^ A2 = 42. //
// E0 ^ A2 = 42.
//
// Note that because of the xor-ing, the byte sequence stored in the entry is // Note that because of the xor-ing, the byte sequence stored in the entry is
// not valid UTF-8. // not valid UTF-8.
var inverseData = [150][4]byte{ var inverseData = [150][4]byte{

View File

@ -1158,21 +1158,31 @@ var widthIndex = [1408]uint8{
} }
// inverseData contains 4-byte entries of the following format: // inverseData contains 4-byte entries of the following format:
// <length> <modified UTF-8-encoded rune> <0 padding> //
// <length> <modified UTF-8-encoded rune> <0 padding>
//
// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the // The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the
// UTF-8 encoding of the original rune. Mappings often have the following // UTF-8 encoding of the original rune. Mappings often have the following
// pattern: // pattern:
// -> A (U+FF21 -> U+0041) //
// -> B (U+FF22 -> U+0042) // -> A (U+FF21 -> U+0041)
// ... // -> B (U+FF22 -> U+0042)
// ...
//
// By xor-ing the last byte the same entry can be shared by many mappings. This // By xor-ing the last byte the same entry can be shared by many mappings. This
// reduces the total number of distinct entries by about two thirds. // reduces the total number of distinct entries by about two thirds.
// The resulting entry for the aforementioned mappings is // The resulting entry for the aforementioned mappings is
// { 0x01, 0xE0, 0x00, 0x00 } //
// { 0x01, 0xE0, 0x00, 0x00 }
//
// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get // Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get
// E0 ^ A1 = 41. //
// E0 ^ A1 = 41.
//
// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get // Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get
// E0 ^ A2 = 42. //
// E0 ^ A2 = 42.
//
// Note that because of the xor-ing, the byte sequence stored in the entry is // Note that because of the xor-ing, the byte sequence stored in the entry is
// not valid UTF-8. // not valid UTF-8.
var inverseData = [150][4]byte{ var inverseData = [150][4]byte{

View File

@ -1178,21 +1178,31 @@ var widthIndex = [1408]uint8{
} }
// inverseData contains 4-byte entries of the following format: // inverseData contains 4-byte entries of the following format:
// <length> <modified UTF-8-encoded rune> <0 padding> //
// <length> <modified UTF-8-encoded rune> <0 padding>
//
// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the // The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the
// UTF-8 encoding of the original rune. Mappings often have the following // UTF-8 encoding of the original rune. Mappings often have the following
// pattern: // pattern:
// -> A (U+FF21 -> U+0041) //
// -> B (U+FF22 -> U+0042) // -> A (U+FF21 -> U+0041)
// ... // -> B (U+FF22 -> U+0042)
// ...
//
// By xor-ing the last byte the same entry can be shared by many mappings. This // By xor-ing the last byte the same entry can be shared by many mappings. This
// reduces the total number of distinct entries by about two thirds. // reduces the total number of distinct entries by about two thirds.
// The resulting entry for the aforementioned mappings is // The resulting entry for the aforementioned mappings is
// { 0x01, 0xE0, 0x00, 0x00 } //
// { 0x01, 0xE0, 0x00, 0x00 }
//
// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get // Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get
// E0 ^ A1 = 41. //
// E0 ^ A1 = 41.
//
// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get // Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get
// E0 ^ A2 = 42. //
// E0 ^ A2 = 42.
//
// Note that because of the xor-ing, the byte sequence stored in the entry is // Note that because of the xor-ing, the byte sequence stored in the entry is
// not valid UTF-8. // not valid UTF-8.
var inverseData = [150][4]byte{ var inverseData = [150][4]byte{

View File

@ -1179,21 +1179,31 @@ var widthIndex = [1408]uint8{
} }
// inverseData contains 4-byte entries of the following format: // inverseData contains 4-byte entries of the following format:
// <length> <modified UTF-8-encoded rune> <0 padding> //
// <length> <modified UTF-8-encoded rune> <0 padding>
//
// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the // The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the
// UTF-8 encoding of the original rune. Mappings often have the following // UTF-8 encoding of the original rune. Mappings often have the following
// pattern: // pattern:
// -> A (U+FF21 -> U+0041) //
// -> B (U+FF22 -> U+0042) // -> A (U+FF21 -> U+0041)
// ... // -> B (U+FF22 -> U+0042)
// ...
//
// By xor-ing the last byte the same entry can be shared by many mappings. This // By xor-ing the last byte the same entry can be shared by many mappings. This
// reduces the total number of distinct entries by about two thirds. // reduces the total number of distinct entries by about two thirds.
// The resulting entry for the aforementioned mappings is // The resulting entry for the aforementioned mappings is
// { 0x01, 0xE0, 0x00, 0x00 } //
// { 0x01, 0xE0, 0x00, 0x00 }
//
// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get // Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get
// E0 ^ A1 = 41. //
// E0 ^ A1 = 41.
//
// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get // Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get
// E0 ^ A2 = 42. //
// E0 ^ A2 = 42.
//
// Note that because of the xor-ing, the byte sequence stored in the entry is // Note that because of the xor-ing, the byte sequence stored in the entry is
// not valid UTF-8. // not valid UTF-8.
var inverseData = [150][4]byte{ var inverseData = [150][4]byte{

View File

@ -1114,21 +1114,31 @@ var widthIndex = [1408]uint8{
} }
// inverseData contains 4-byte entries of the following format: // inverseData contains 4-byte entries of the following format:
// <length> <modified UTF-8-encoded rune> <0 padding> //
// <length> <modified UTF-8-encoded rune> <0 padding>
//
// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the // The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the
// UTF-8 encoding of the original rune. Mappings often have the following // UTF-8 encoding of the original rune. Mappings often have the following
// pattern: // pattern:
// -> A (U+FF21 -> U+0041) //
// -> B (U+FF22 -> U+0042) // -> A (U+FF21 -> U+0041)
// ... // -> B (U+FF22 -> U+0042)
// ...
//
// By xor-ing the last byte the same entry can be shared by many mappings. This // By xor-ing the last byte the same entry can be shared by many mappings. This
// reduces the total number of distinct entries by about two thirds. // reduces the total number of distinct entries by about two thirds.
// The resulting entry for the aforementioned mappings is // The resulting entry for the aforementioned mappings is
// { 0x01, 0xE0, 0x00, 0x00 } //
// { 0x01, 0xE0, 0x00, 0x00 }
//
// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get // Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get
// E0 ^ A1 = 41. //
// E0 ^ A1 = 41.
//
// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get // Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get
// E0 ^ A2 = 42. //
// E0 ^ A2 = 42.
//
// Note that because of the xor-ing, the byte sequence stored in the entry is // Note that because of the xor-ing, the byte sequence stored in the entry is
// not valid UTF-8. // not valid UTF-8.
var inverseData = [150][4]byte{ var inverseData = [150][4]byte{

16
vendor/modules.txt vendored
View File

@ -230,6 +230,15 @@ github.com/golang/protobuf/ptypes/wrappers
# github.com/golang/snappy v0.0.4 # github.com/golang/snappy v0.0.4
## explicit ## explicit
github.com/golang/snappy github.com/golang/snappy
# github.com/google/fscrypt v0.3.3
## explicit; go 1.11
github.com/google/fscrypt/actions
github.com/google/fscrypt/crypto
github.com/google/fscrypt/filesystem
github.com/google/fscrypt/keyring
github.com/google/fscrypt/metadata
github.com/google/fscrypt/security
github.com/google/fscrypt/util
# github.com/google/gnostic v0.5.7-v3refs # github.com/google/gnostic v0.5.7-v3refs
## explicit; go 1.12 ## explicit; go 1.12
github.com/google/gnostic/compiler github.com/google/gnostic/compiler
@ -476,6 +485,9 @@ github.com/pierrec/lz4/internal/xxh32
# github.com/pkg/errors v0.9.1 # github.com/pkg/errors v0.9.1
## explicit ## explicit
github.com/pkg/errors github.com/pkg/errors
# github.com/pkg/xattr v0.4.7
## explicit; go 1.14
github.com/pkg/xattr
# github.com/pmezard/go-difflib v1.0.0 # github.com/pmezard/go-difflib v1.0.0
## explicit ## explicit
github.com/pmezard/go-difflib/difflib github.com/pmezard/go-difflib/difflib
@ -594,6 +606,7 @@ go.uber.org/zap/internal/exit
go.uber.org/zap/zapcore go.uber.org/zap/zapcore
# golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd # golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd
## explicit; go 1.17 ## explicit; go 1.17
golang.org/x/crypto/argon2
golang.org/x/crypto/blake2b golang.org/x/crypto/blake2b
golang.org/x/crypto/blowfish golang.org/x/crypto/blowfish
golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20
@ -602,6 +615,7 @@ golang.org/x/crypto/cryptobyte/asn1
golang.org/x/crypto/curve25519 golang.org/x/crypto/curve25519
golang.org/x/crypto/curve25519/internal/field golang.org/x/crypto/curve25519/internal/field
golang.org/x/crypto/ed25519 golang.org/x/crypto/ed25519
golang.org/x/crypto/hkdf
golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/internal/poly1305
golang.org/x/crypto/internal/subtle golang.org/x/crypto/internal/subtle
golang.org/x/crypto/pbkdf2 golang.org/x/crypto/pbkdf2
@ -637,7 +651,7 @@ golang.org/x/sys/windows
# golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 # golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
## explicit; go 1.17 ## explicit; go 1.17
golang.org/x/term golang.org/x/term
# golang.org/x/text v0.3.7 # golang.org/x/text v0.3.8
## explicit; go 1.17 ## explicit; go 1.17
golang.org/x/text/cases golang.org/x/text/cases
golang.org/x/text/encoding golang.org/x/text/encoding