diff --git a/e2e/e2e_test.go b/e2e/e2e_test.go index 338a776b6..40745d8b5 100644 --- a/e2e/e2e_test.go +++ b/e2e/e2e_test.go @@ -39,6 +39,7 @@ func init() { flag.BoolVar(&deployNFS, "deploy-nfs", false, "deploy nfs csi driver") flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephFS csi driver") flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver") + flag.BoolVar(&testRBDFSCrypt, "test-rbd-fscrypt", false, "test rbd csi driver fscrypt support") flag.BoolVar(&testNBD, "test-nbd", false, "test rbd csi driver with rbd-nbd mounter") flag.BoolVar(&testNFS, "test-nfs", false, "test nfs csi driver") flag.BoolVar(&helmTest, "helm-test", false, "tests running on deployment via helm") diff --git a/e2e/rbd.go b/e2e/rbd.go index 4383d1774..649fd5d93 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -232,6 +232,24 @@ func checkClusternameInMetadata(f *framework.Framework, ns, pool, image string) } } +// ByFileAndBlockEncryption wraps ginkgo's By to run the test body using file and block encryption specific validators. +func ByFileAndBlockEncryption( + text string, + callback func(validator encryptionValidateFunc, pvcValidator validateFunc, encryptionType util.EncryptionType), +) { + By(text+" (block)", func() { + callback(validateEncryptedPVCAndAppBinding, isBlockEncryptedPVC, util.EncryptionTypeBlock) + }) + By(text+" (file)", func() { + if !testRBDFSCrypt { + e2elog.Logf("skipping RBD fscrypt file encryption test") + + return + } + callback(validateEncryptedFilesystemAndAppBinding, isFileEncryptedPVC, util.EncryptionTypeFile) + }) +} + var _ = Describe("RBD", func() { f := framework.NewDefaultFramework(rbdType) var c clientset.Interface @@ -989,7 +1007,8 @@ var _ = Describe("RBD", func() { noKMS, noKMS, defaultSCName, erasureCodedPool, - f) + f, + noPVCValidation) }) By("create an erasure coded PVC and validate PVC-PVC clone", func() { @@ -1871,13 +1890,14 @@ var _ = Describe("RBD", func() { } }) - By("create a PVC and bind it to an app using rbd-nbd mounter with encryption", func() { + ByFileAndBlockEncryption("create a PVC and bind it to an app using rbd-nbd mounter with encryption", func( + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, + ) { if !testNBD { e2elog.Logf("skipping NBD test") return } - err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -1893,12 +1913,13 @@ var _ = Describe("RBD", func() { "mapOptions": nbdMapOptions, "cephLogStrategy": e2eDefaultCephLogStrategy, "encrypted": "true", + "encryptionType": util.EncryptionTypeString(encType), }, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass: %v", err) } - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) + err = validator(pvcPath, appPath, noKMS, f) if err != nil { e2elog.Failf("failed to validate encrypted pvc: %v", err) } @@ -1915,7 +1936,9 @@ var _ = Describe("RBD", func() { } }) - By("create a PVC and bind it to an app with encrypted RBD volume", func() { + ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume", func( + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -1925,12 +1948,12 @@ var _ = Describe("RBD", func() { f, defaultSCName, nil, - map[string]string{"encrypted": "true"}, + map[string]string{"encrypted": "true", "encryptionType": util.EncryptionTypeString(encType)}, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass: %v", err) } - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) + err = validator(pvcPath, appPath, noKMS, f) if err != nil { e2elog.Failf("failed to validate encrypted pvc: %v", err) } @@ -1947,7 +1970,9 @@ var _ = Describe("RBD", func() { } }) - By("Resize Encrypted Block PVC and check Device size", func() { + ByFileAndBlockEncryption("Resize Encrypted Block PVC and check Device size", func( + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -1957,7 +1982,7 @@ var _ = Describe("RBD", func() { f, defaultSCName, nil, - map[string]string{"encrypted": "true"}, + map[string]string{"encrypted": "true", "encryptionType": util.EncryptionTypeString(encType)}, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass: %v", err) @@ -1972,15 +1997,16 @@ var _ = Describe("RBD", func() { validateRBDImageCount(f, 0, defaultRBDPool) validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) - // Block PVC resize - err = resizePVCAndValidateSize(rawPvcPath, rawAppPath, f) - if err != nil { - e2elog.Failf("failed to resize block PVC: %v", err) + if encType != util.EncryptionTypeFile { + // Block PVC resize + err = resizePVCAndValidateSize(rawPvcPath, rawAppPath, f) + if err != nil { + e2elog.Failf("failed to resize block PVC: %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 0, defaultRBDPool) + validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) } - // validate created backend rbd images - validateRBDImageCount(f, 0, defaultRBDPool) - validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) - err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -1991,7 +2017,9 @@ var _ = Describe("RBD", func() { } }) - By("create a PVC and bind it to an app with encrypted RBD volume with VaultKMS", func() { + ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with VaultKMS", func( + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -1999,12 +2027,13 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass: %v", err) } - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, vaultKMS, f) + err = validator(pvcPath, appPath, vaultKMS, f) if err != nil { e2elog.Failf("failed to validate encrypted pvc: %v", err) } @@ -2021,7 +2050,9 @@ var _ = Describe("RBD", func() { } }) - By("create a PVC and bind it to an app with encrypted RBD volume with VaultTokensKMS", func() { + ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with VaultTokensKMS", func( + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2029,6 +2060,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-tokens-test", + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2048,7 +2080,7 @@ var _ = Describe("RBD", func() { e2elog.Failf("failed to create Secret with tenant token: %v", err) } - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, vaultTokensKMS, f) + err = validator(pvcPath, appPath, vaultTokensKMS, f) if err != nil { e2elog.Failf("failed to validate encrypted pvc: %v", err) } @@ -2072,7 +2104,9 @@ var _ = Describe("RBD", func() { } }) - By("create a PVC and bind it to an app with encrypted RBD volume with VaultTenantSA KMS", func() { + ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with VaultTenantSA KMS", func( + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2080,6 +2114,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-tenant-sa-test", + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2092,7 +2127,7 @@ var _ = Describe("RBD", func() { } defer deleteTenantServiceAccount(f.UniqueName) - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, vaultTenantSAKMS, f) + err = validator(pvcPath, appPath, vaultTenantSAKMS, f) if err != nil { e2elog.Failf("failed to validate encrypted pvc: %v", err) } @@ -2109,37 +2144,41 @@ var _ = Describe("RBD", func() { } }) - By("create a PVC and bind it to an app with encrypted RBD volume with SecretsMetadataKMS", func() { - err := deleteResource(rbdExamplePath + "storageclass.yaml") - if err != nil { - e2elog.Failf("failed to delete storageclass: %v", err) - } - scOpts := map[string]string{ - "encrypted": "true", - "encryptionKMSID": "secrets-metadata-test", - } - err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) - if err != nil { - e2elog.Failf("failed to create storageclass: %v", err) - } - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) - if err != nil { - e2elog.Failf("failed to validate encrypted pvc: %v", err) - } - // validate created backend rbd images - validateRBDImageCount(f, 0, defaultRBDPool) - validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) - err = deleteResource(rbdExamplePath + "storageclass.yaml") - if err != nil { - e2elog.Failf("failed to delete storageclass: %v", err) - } - err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) - if err != nil { - e2elog.Failf("failed to create storageclass: %v", err) - } - }) + ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with SecretsMetadataKMS", + func(validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType) { + err := deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass: %v", err) + } + scOpts := map[string]string{ + "encrypted": "true", + "encryptionKMSID": "secrets-metadata-test", + "encryptionType": util.EncryptionTypeString(encType), + } + err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass: %v", err) + } + err = validator(pvcPath, appPath, noKMS, f) + if err != nil { + e2elog.Failf("failed to validate encrypted pvc: %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 0, defaultRBDPool) + validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass: %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass: %v", err) + } + }) - By("test RBD volume encryption with user secrets based SecretsMetadataKMS", func() { + ByFileAndBlockEncryption("test RBD volume encryption with user secrets based SecretsMetadataKMS", func( + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2147,6 +2186,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "user-ns-secrets-metadata-test", + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2162,7 +2202,7 @@ var _ = Describe("RBD", func() { e2elog.Failf("failed to create user Secret: %v", err) } - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) + err = validator(pvcPath, appPath, noKMS, f) if err != nil { e2elog.Failf("failed to validate encrypted pvc: %v", err) } @@ -2190,9 +2230,9 @@ var _ = Describe("RBD", func() { } }) - By( + ByFileAndBlockEncryption( "test RBD volume encryption with user secrets based SecretsMetadataKMS with tenant namespace", - func() { + func(validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType util.EncryptionType) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2200,6 +2240,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "user-secrets-metadata-test", + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2215,7 +2256,7 @@ var _ = Describe("RBD", func() { e2elog.Failf("failed to create user Secret: %v", err) } - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) + err = validator(pvcPath, appPath, noKMS, f) if err != nil { e2elog.Failf("failed to validate encrypted pvc: %v", err) } @@ -2298,7 +2339,8 @@ var _ = Describe("RBD", func() { noKMS, noKMS, defaultSCName, noDataPool, - f) + f, + noPVCValidation) }) By("create a PVC-PVC clone and bind it to an app", func() { @@ -2315,7 +2357,9 @@ var _ = Describe("RBD", func() { f) }) - By("create an encrypted PVC snapshot and restore it for an app with VaultKMS", func() { + ByFileAndBlockEncryption("create an encrypted PVC snapshot and restore it for an app with VaultKMS", func( + validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType util.EncryptionType, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2323,6 +2367,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2333,7 +2378,7 @@ var _ = Describe("RBD", func() { pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath, vaultKMS, vaultKMS, defaultSCName, noDataPool, - f) + f, isEncryptedPVC) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -2345,7 +2390,9 @@ var _ = Describe("RBD", func() { } }) - By("Validate PVC restore from vaultKMS to vaultTenantSAKMS", func() { + ByFileAndBlockEncryption("Validate PVC restore from vaultKMS to vaultTenantSAKMS", func( + validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType util.EncryptionType, + ) { restoreSCName := "restore-sc" err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -2354,6 +2401,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2363,6 +2411,7 @@ var _ = Describe("RBD", func() { scOpts = map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-tenant-sa-test", + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, restoreSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2378,7 +2427,8 @@ var _ = Describe("RBD", func() { validatePVCSnapshot(1, pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath, vaultKMS, vaultTenantSAKMS, - restoreSCName, noDataPool, f) + restoreSCName, noDataPool, f, + isEncryptedPVC) err = retryKubectlArgs(cephCSINamespace, kubectlDelete, deployTimeout, "storageclass", restoreSCName) if err != nil { @@ -2400,7 +2450,9 @@ var _ = Describe("RBD", func() { } }) - By("Validate PVC-PVC clone with different SC from vaultKMS to vaultTenantSAKMS", func() { + ByFileAndBlockEncryption("Validate PVC-PVC clone with different SC from vaultKMS to vaultTenantSAKMS", func( + validator encryptionValidateFunc, isValidPVC validateFunc, encType util.EncryptionType, + ) { restoreSCName := "restore-sc" err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -2409,6 +2461,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2418,6 +2471,7 @@ var _ = Describe("RBD", func() { scOpts = map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-tenant-sa-test", + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, restoreSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2438,7 +2492,7 @@ var _ = Describe("RBD", func() { restoreSCName, noDataPool, secretsMetadataKMS, - isEncryptedPVC, + isValidPVC, f) err = retryKubectlArgs(cephCSINamespace, kubectlDelete, deployTimeout, "storageclass", restoreSCName) @@ -2460,7 +2514,9 @@ var _ = Describe("RBD", func() { } }) - By("create an encrypted PVC-PVC clone and bind it to an app", func() { + ByFileAndBlockEncryption("create an encrypted PVC-PVC clone and bind it to an app", func( + validator encryptionValidateFunc, isValidPVC validateFunc, encType util.EncryptionType, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2468,6 +2524,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "secrets-metadata-test", + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2482,7 +2539,7 @@ var _ = Describe("RBD", func() { defaultSCName, noDataPool, secretsMetadataKMS, - isEncryptedPVC, + isValidPVC, f) err = deleteResource(rbdExamplePath + "storageclass.yaml") @@ -2495,7 +2552,9 @@ var _ = Describe("RBD", func() { } }) - By("create an encrypted PVC-PVC clone and bind it to an app with VaultKMS", func() { + ByFileAndBlockEncryption("create an encrypted PVC-PVC clone and bind it to an app with VaultKMS", func( + validator encryptionValidateFunc, isValidPVC validateFunc, encType util.EncryptionType, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2503,6 +2562,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2517,7 +2577,7 @@ var _ = Describe("RBD", func() { defaultSCName, noDataPool, vaultKMS, - isEncryptedPVC, + isValidPVC, f) err = deleteResource(rbdExamplePath + "storageclass.yaml") @@ -3992,10 +4052,13 @@ var _ = Describe("RBD", func() { } }) - By("restore snapshot to bigger size encrypted PVC with VaultKMS", func() { + ByFileAndBlockEncryption("restore snapshot to bigger size encrypted PVC with VaultKMS", func( + _ encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, + ) { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", + "encryptionType": util.EncryptionTypeString(encType), } err := createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -4027,15 +4090,17 @@ var _ = Describe("RBD", func() { if err != nil { e2elog.Failf("failed to validate restore bigger size clone: %v", err) } - // validate block mode PVC - err = validateBiggerPVCFromSnapshot(f, - rawPvcPath, - rawAppPath, - snapshotPath, - pvcBlockRestorePath, - appBlockRestorePath) - if err != nil { - e2elog.Failf("failed to validate restore bigger size clone: %v", err) + if encType != util.EncryptionTypeFile { + // validate block mode PVC + err = validateBiggerPVCFromSnapshot(f, + rawPvcPath, + rawAppPath, + snapshotPath, + pvcBlockRestorePath, + appBlockRestorePath) + if err != nil { + e2elog.Failf("failed to validate restore bigger size clone: %v", err) + } } }) @@ -4050,9 +4115,12 @@ var _ = Describe("RBD", func() { }) By("clone PVC to a bigger size PVC", func() { - By("clone PVC to bigger size encrypted PVC with VaultKMS", func() { + ByFileAndBlockEncryption("clone PVC to bigger size encrypted PVC with VaultKMS", func( + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, + ) { scOpts := map[string]string{ "encrypted": "true", + "encryptionType": util.EncryptionTypeString(encType), "encryptionKMSID": "vault-test", } err := createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) @@ -4075,14 +4143,16 @@ var _ = Describe("RBD", func() { if err != nil { e2elog.Failf("failed to validate bigger size clone: %v", err) } - // validate block mode PVC - err = validateBiggerCloneFromPVC(f, - rawPvcPath, - rawAppPath, - pvcBlockSmartClonePath, - appBlockSmartClonePath) - if err != nil { - e2elog.Failf("failed to validate bigger size clone: %v", err) + if encType != util.EncryptionTypeFile { + // validate block mode PVC + err = validateBiggerCloneFromPVC(f, + rawPvcPath, + rawAppPath, + pvcBlockSmartClonePath, + appBlockSmartClonePath) + if err != nil { + e2elog.Failf("failed to validate bigger size clone: %v", err) + } } }) @@ -4268,14 +4338,38 @@ var _ = Describe("RBD", func() { validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) }) - // Make sure this should be last testcase in this file, because - // it deletes pool - By("Create a PVC and delete PVC when backend pool deleted", func() { - err := pvcDeleteWhenPoolNotFound(pvcPath, false, f) + By("create a PVC and bind it to an app with encrypted RBD volume (default type setting)", func() { + err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete PVC when pool not found: %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) + } + err = createRBDStorageClass( + f.ClientSet, + f, + defaultSCName, + nil, + map[string]string{"encrypted": "true"}, + deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass: %v", err) + } + err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) + if err != nil { + e2elog.Failf("failed to validate encrypted pvc: %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 0, defaultRBDPool) + validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass: %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass: %v", err) } }) + // delete RBD provisioner secret err := deleteCephUser(f, keyringRBDProvisionerUsername) if err != nil { @@ -4286,6 +4380,15 @@ var _ = Describe("RBD", func() { if err != nil { e2elog.Failf("failed to delete user %s: %v", keyringRBDNodePluginUsername, err) } + + // Make sure this should be last testcase in this file, because + // it deletes pool + By("Create a PVC and delete PVC when backend pool deleted", func() { + err := pvcDeleteWhenPoolNotFound(pvcPath, false, f) + if err != nil { + e2elog.Failf("failed to delete PVC when pool not found: %v", err) + } + }) }) }) }) diff --git a/e2e/rbd_helper.go b/e2e/rbd_helper.go index ab1a11fc7..498a2f052 100644 --- a/e2e/rbd_helper.go +++ b/e2e/rbd_helper.go @@ -482,6 +482,8 @@ func validateCloneInDifferentPool(f *framework.Framework, snapshotPool, cloneSc, return nil } +type encryptionValidateFunc func(pvcPath, appPath string, kms kmsConfig, f *framework.Framework) error + func validateEncryptedPVCAndAppBinding(pvcPath, appPath string, kms kmsConfig, f *framework.Framework) error { pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout) if err != nil { @@ -531,20 +533,84 @@ func validateEncryptedPVCAndAppBinding(pvcPath, appPath string, kms kmsConfig, f return nil } +func validateEncryptedFilesystemAndAppBinding(pvcPath, appPath string, kms kmsConfig, f *framework.Framework) error { + pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout) + if err != nil { + return err + } + imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) + if err != nil { + return err + } + + rbdImageSpec := imageSpec(defaultRBDPool, imageData.imageName) + err = validateEncryptedFilesystem(f, rbdImageSpec, imageData.pvName, app.Name) + if err != nil { + return err + } + + if kms != noKMS && kms.canGetPassphrase() { + // check new passphrase created + _, stdErr := kms.getPassphrase(f, imageData.csiVolumeHandle) + if stdErr != "" { + return fmt.Errorf("failed to read passphrase from vault: %s", stdErr) + } + } + + err = deletePVCAndApp("", f, pvc, app) + if err != nil { + return err + } + + if kms != noKMS && kms.canGetPassphrase() { + // check new passphrase created + stdOut, _ := kms.getPassphrase(f, imageData.csiVolumeHandle) + if stdOut != "" { + return fmt.Errorf("passphrase found in vault while should be deleted: %s", stdOut) + } + } + + if kms != noKMS && kms.canVerifyKeyDestroyed() { + destroyed, msg := kms.verifyKeyDestroyed(f, imageData.csiVolumeHandle) + if !destroyed { + return fmt.Errorf("passphrased was not destroyed: %s", msg) + } else if msg != "" { + e2elog.Logf("passphrase destroyed, but message returned: %s", msg) + } + } + + return nil +} + type validateFunc func(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error // noPVCValidation can be used to pass to validatePVCClone when no extra // validation of the PVC is needed. var noPVCValidation validateFunc -func isEncryptedPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error { +type imageValidateFunc func(f *framework.Framework, rbdImageSpec, pvName, appName string) error + +func isEncryptedPVC( + f *framework.Framework, + pvc *v1.PersistentVolumeClaim, + app *v1.Pod, + validateFunc imageValidateFunc, +) error { imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) if err != nil { return err } rbdImageSpec := imageSpec(defaultRBDPool, imageData.imageName) - return validateEncryptedImage(f, rbdImageSpec, imageData.pvName, app.Name) + return validateFunc(f, rbdImageSpec, imageData.pvName, app.Name) +} + +func isBlockEncryptedPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error { + return isEncryptedPVC(f, pvc, app, validateEncryptedImage) +} + +func isFileEncryptedPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error { + return isEncryptedPVC(f, pvc, app, validateEncryptedFilesystem) } // validateEncryptedImage verifies that the RBD image is encrypted. The @@ -583,6 +649,48 @@ func validateEncryptedImage(f *framework.Framework, rbdImageSpec, pvName, appNam return nil } +func validateEncryptedFilesystem(f *framework.Framework, rbdImageSpec, pvName, appName string) error { + pod, err := f.ClientSet.CoreV1().Pods(f.UniqueName).Get(context.TODO(), appName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get pod %q in namespace %q: %w", appName, f.UniqueName, err) + } + volumeMountPath := fmt.Sprintf( + "/var/lib/kubelet/pods/%s/volumes/kubernetes.io~csi/%s/mount", + pod.UID, + pvName) + + selector, err := getDaemonSetLabelSelector(f, cephCSINamespace, rbdDaemonsetName) + if err != nil { + return fmt.Errorf("failed to get labels: %w", err) + } + opt := metav1.ListOptions{ + LabelSelector: selector, + } + cmd := fmt.Sprintf("lsattr -la %s | grep -E '%s/.\\s+Encrypted'", volumeMountPath, volumeMountPath) + _, _, err = execCommandInContainer(f, cmd, cephCSINamespace, "csi-rbdplugin", &opt) + if err != nil { + cmd = fmt.Sprintf("lsattr -lRa %s", volumeMountPath) + stdOut, stdErr, listErr := execCommandInContainer(f, cmd, cephCSINamespace, "csi-rbdplugin", &opt) + if listErr == nil { + return fmt.Errorf("error checking file encrypted attribute of %q. listing filesystem+attrs: %s %s", + volumeMountPath, stdOut, stdErr) + } + + return fmt.Errorf("error checking file encrypted attribute: %w", err) + } + + mountType, err := getMountType(selector, volumeMountPath, f) + if err != nil { + return err + } + if mountType == "crypt" { + return fmt.Errorf("mount type of %q is %v suggesting that the block device was encrypted,"+ + " when it must not have been", volumeMountPath, mountType) + } + + return nil +} + func listRBDImages(f *framework.Framework, pool string) ([]string, error) { var imgInfos []string diff --git a/e2e/utils.go b/e2e/utils.go index 58ed04efa..4e259805f 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -85,6 +85,7 @@ var ( deployNFS bool testCephFS bool testRBD bool + testRBDFSCrypt bool testNBD bool testNFS bool helmTest bool @@ -1025,6 +1026,7 @@ func validatePVCSnapshot( pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath string, kms, restoreKMS kmsConfig, restoreSCName, dataPool string, f *framework.Framework, + isEncryptedPVC validateFunc, ) { var wg sync.WaitGroup wgErrs := make([]error, totalCount) @@ -1448,7 +1450,7 @@ func validateController( } if scParams["encrypted"] == strconv.FormatBool(true) { // check encryption - err = isEncryptedPVC(f, resizePvc, app) + err = isBlockEncryptedPVC(f, resizePvc, app) if err != nil { return err } diff --git a/examples/rbd/storageclass.yaml b/examples/rbd/storageclass.yaml index a30114667..7270b642b 100644 --- a/examples/rbd/storageclass.yaml +++ b/examples/rbd/storageclass.yaml @@ -108,6 +108,14 @@ parameters: # A string is expected here, i.e. "true", not true. # encrypted: "true" + # (optional) Select the encryption type when encrypted: "true" above. + # Valid values are: + # "file": Enable file encryption on the mounted filesystem + # "block": Encrypt RBD block device + # When unspecified assume type "block". "file" and "block" are + # mutally exclusive. + # encryptionType: "block" + # (optional) Use external key management system for encryption passphrases by # specifying a unique ID matching KMS ConfigMap. The ID is only used for # correlation to configmap entry. diff --git a/go.mod b/go.mod index e47f1b7d1..f33313795 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( github.com/csi-addons/spec v0.1.2-0.20220906123848-52ce69f90900 github.com/gemalto/kmip-go v0.0.8-0.20220721195433-3fe83e2d3f26 github.com/golang/protobuf v1.5.2 + github.com/google/fscrypt v0.3.3 github.com/google/uuid v1.3.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 @@ -23,6 +24,7 @@ require ( github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a github.com/onsi/ginkgo/v2 v2.1.6 github.com/onsi/gomega v1.20.1 + github.com/pkg/xattr v0.4.7 github.com/prometheus/client_golang v1.12.2 github.com/stretchr/testify v1.8.0 golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd @@ -149,7 +151,7 @@ require ( go.uber.org/zap v1.21.0 // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/text v0.3.8 // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index 2552be644..137b8e2db 100644 --- a/go.sum +++ b/go.sum @@ -485,6 +485,8 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cadvisor v0.45.0/go.mod h1:vsMT3Uv2XjQ8M7WUtKARV74mU/HN64C4XtM1bJhUKcU= github.com/google/cel-go v0.12.4/go.mod h1:Av7CU6r6X3YmcHR9GXqVDaEJYfEtSxl6wvIjUQTriCw= +github.com/google/fscrypt v0.3.3 h1:qwx9OCR/xZE68VGr/r0/yugFhlGpIOGsH9JHrttP7vc= +github.com/google/fscrypt v0.3.3/go.mod h1:H1JHtH8BVe0dYNhzx1Ztkn3azQ0OBdoOmM828vEWAXc= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -991,6 +993,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/xattr v0.4.7 h1:XoA3KzmFvyPlH4RwX5eMcgtzcaGBaSvgt3IoFQfbrmQ= +github.com/pkg/xattr v0.4.7/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/portworx/dcos-secrets v0.0.0-20180616013705-8e8ec3f66611/go.mod h1:4hklRW/4DQpLqkcXcjtNprbH2tz/sJaNtqinfPWl/LA= @@ -1143,6 +1147,7 @@ github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYp github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= @@ -1497,12 +1502,14 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210422114643-f5beecf764ed/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1515,8 +1522,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1551,6 +1559,7 @@ golang.org/x/tools v0.0.0-20190718200317-82a3ea8a504c/go.mod h1:jcCCGcm9btYwXyDq golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191025023517-2077df36852e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/internal/cephfs/core/metadata.go b/internal/cephfs/core/metadata.go index d5fb8a73a..9e2b90d5f 100644 --- a/internal/cephfs/core/metadata.go +++ b/internal/cephfs/core/metadata.go @@ -40,7 +40,7 @@ func (s *subVolumeClient) supportsSubVolMetadata() bool { func (s *subVolumeClient) isUnsupportedSubVolMetadata(err error) bool { var invalid fsAdmin.NotImplementedError - if err != nil && errors.Is(err, &invalid) { + if err != nil && errors.As(err, &invalid) { // In case the error is other than invalid command return error to the caller. clusterAdditionalInfo[s.clusterID].subVolMetadataState = unsupported diff --git a/internal/cephfs/core/snapshot_metadata.go b/internal/cephfs/core/snapshot_metadata.go index 5cf67ddc0..f168fbf8c 100644 --- a/internal/cephfs/core/snapshot_metadata.go +++ b/internal/cephfs/core/snapshot_metadata.go @@ -36,7 +36,7 @@ func (s *snapshotClient) supportsSubVolSnapMetadata() bool { func (s *snapshotClient) isUnsupportedSubVolSnapMetadata(err error) bool { var invalid fsAdmin.NotImplementedError - if err != nil && errors.Is(err, &invalid) { + if err != nil && errors.As(err, &invalid) { // In case the error is other than invalid command return error to // the caller. clusterAdditionalInfo[s.clusterID].subVolSnapshotMetadataState = unsupported diff --git a/internal/cephfs/store/fsjournal.go b/internal/cephfs/store/fsjournal.go index d2a7e0678..656304efe 100644 --- a/internal/cephfs/store/fsjournal.go +++ b/internal/cephfs/store/fsjournal.go @@ -90,7 +90,7 @@ func CheckVolExists(ctx context.Context, defer j.Destroy() imageData, err := j.CheckReservation( - ctx, volOptions.MetadataPool, volOptions.RequestName, volOptions.NamePrefix, "", "") + ctx, volOptions.MetadataPool, volOptions.RequestName, volOptions.NamePrefix, "", "", util.EncryptionTypeNone) if err != nil { return nil, err } @@ -278,7 +278,7 @@ func ReserveVol(ctx context.Context, volOptions *VolumeOptions, secret map[strin imageUUID, vid.FsSubvolName, err = j.ReserveName( ctx, volOptions.MetadataPool, util.InvalidPoolID, volOptions.MetadataPool, util.InvalidPoolID, volOptions.RequestName, - volOptions.NamePrefix, "", "", volOptions.ReservedID, "", volOptions.BackingSnapshotID) + volOptions.NamePrefix, "", "", volOptions.ReservedID, "", volOptions.BackingSnapshotID, util.EncryptionTypeNone) if err != nil { return nil, err } @@ -321,7 +321,7 @@ func ReserveSnap( imageUUID, vid.FsSnapshotName, err = j.ReserveName( ctx, volOptions.MetadataPool, util.InvalidPoolID, volOptions.MetadataPool, util.InvalidPoolID, snap.RequestName, - snap.NamePrefix, parentSubVolName, "", snap.ReservedID, "", "") + snap.NamePrefix, parentSubVolName, "", snap.ReservedID, "", "", util.EncryptionTypeNone) if err != nil { return nil, err } @@ -390,7 +390,7 @@ func CheckSnapExists( defer j.Destroy() snapData, err := j.CheckReservation( - ctx, volOptions.MetadataPool, snap.RequestName, snap.NamePrefix, volOptions.VolID, "") + ctx, volOptions.MetadataPool, snap.RequestName, snap.NamePrefix, volOptions.VolID, "", util.EncryptionTypeNone) if err != nil { return nil, nil, err } diff --git a/internal/journal/voljournal.go b/internal/journal/voljournal.go index 174bb72b4..286d84658 100644 --- a/internal/journal/voljournal.go +++ b/internal/journal/voljournal.go @@ -149,6 +149,9 @@ type Config struct { // encryptKMS in which encryption passphrase was saved, default is no encryption encryptKMSKey string + // encryptKMS in which encryption passphrase was saved, default is no encryption + encryptionType string + // ownerKey is used to identify the owner of the volume, can be used with some KMS configurations ownerKey string @@ -172,6 +175,7 @@ func NewCSIVolumeJournal(suffix string) *Config { namespace: "", csiImageIDKey: "csi.imageid", encryptKMSKey: "csi.volume.encryptKMS", + encryptionType: "csi.volume.encryptionType", ownerKey: "csi.volume.owner", backingSnapshotIDKey: "csi.volume.backingsnapshotid", commonPrefix: "csi.", @@ -191,6 +195,7 @@ func NewCSISnapshotJournal(suffix string) *Config { namespace: "", csiImageIDKey: "csi.imageid", encryptKMSKey: "csi.volume.encryptKMS", + encryptionType: "csi.volume.encryptionType", ownerKey: "csi.volume.owner", commonPrefix: "csi.", } @@ -280,6 +285,7 @@ Return values: */ func (conn *Connection) CheckReservation(ctx context.Context, journalPool, reqName, namePrefix, snapParentName, kmsConfig string, + encryptionType util.EncryptionType, ) (*ImageData, error) { var ( snapSource bool @@ -377,6 +383,16 @@ func (conn *Connection) CheckReservation(ctx context.Context, } } + if encryptionType != util.EncryptionTypeNone { + if savedImageAttributes.EncryptionType != encryptionType { + return nil, fmt.Errorf("internal state inconsistent, omap encryption type"+ + " mismatch, request type %q(%d) volume UUID (%s) volume omap encryption type %q (%d)", + util.EncryptionTypeString(encryptionType), encryptionType, + objUUID, util.EncryptionTypeString(savedImageAttributes.EncryptionType), + savedImageAttributes.EncryptionType) + } + } + // TODO: skipping due to excessive poolID to poolname call, also this should never happen! // check if journal pool points back to the passed in journal pool // if savedJournalPoolID != journalPoolID { @@ -530,6 +546,7 @@ Input arguments: - namePrefix: Prefix to use when generating the image/subvolume name (suffix is an auto-generated UUID) - parentName: Name of the parent image/subvolume if reservation is for a snapshot (optional) - kmsConf: Name of the key management service used to encrypt the image (optional) + - encryptionType: Type of encryption used when kmsConf is set (optional) - volUUID: UUID need to be reserved instead of auto-generating one (this is useful for mirroring and metro-DR) - owner: the owner of the volume (optional) - backingSnapshotID: ID of the snapshot on which the CephFS snapshot-backed volume is based (optional) @@ -544,6 +561,7 @@ func (conn *Connection) ReserveName(ctx context.Context, imagePool string, imagePoolID int64, reqName, namePrefix, parentName, kmsConf, volUUID, owner, backingSnapshotID string, + encryptionType util.EncryptionType, ) (string, string, error) { // TODO: Take in-arg as ImageAttributes? var ( @@ -624,6 +642,7 @@ func (conn *Connection) ReserveName(ctx context.Context, // Update UUID directory to store encryption values if kmsConf != "" { omapValues[cj.encryptKMSKey] = kmsConf + omapValues[cj.encryptionType] = util.EncryptionTypeString(encryptionType) } // if owner is passed, set it in the UUID directory too @@ -660,14 +679,15 @@ func (conn *Connection) ReserveName(ctx context.Context, // ImageAttributes contains all CSI stored image attributes, typically as OMap keys. type ImageAttributes struct { - RequestName string // Contains the request name for the passed in UUID - SourceName string // Contains the parent image name for the passed in UUID, if it is a snapshot - ImageName string // Contains the image or subvolume name for the passed in UUID - KmsID string // Contains encryption KMS, if it is an encrypted image - Owner string // Contains the owner to be used in combination with KmsID (for some KMS) - ImageID string // Contains the image id - JournalPoolID int64 // Pool ID of the CSI journal pool, stored in big endian format (on-disk data) - BackingSnapshotID string // ID of the snapshot on which the CephFS snapshot-backed volume is based + RequestName string // Contains the request name for the passed in UUID + SourceName string // Contains the parent image name for the passed in UUID, if it is a snapshot + ImageName string // Contains the image or subvolume name for the passed in UUID + KmsID string // Contains encryption KMS, if it is an encrypted image + EncryptionType util.EncryptionType // Type of encryption used, if image encrypted + Owner string // Contains the owner to be used in combination with KmsID (for some KMS) + ImageID string // Contains the image id + JournalPoolID int64 // Pool ID of the CSI journal pool, stored in big endian format (on-disk data) + BackingSnapshotID string // ID of the snapshot on which the CephFS snapshot-backed volume is based } // GetImageAttributes fetches all keys and their values, from a UUID directory, returning ImageAttributes structure. @@ -692,6 +712,7 @@ func (conn *Connection) GetImageAttributes( cj.csiNameKey, cj.csiImageKey, cj.encryptKMSKey, + cj.encryptionType, cj.csiJournalPool, cj.cephSnapSourceKey, cj.csiImageIDKey, @@ -711,6 +732,7 @@ func (conn *Connection) GetImageAttributes( var found bool imageAttributes.RequestName = values[cj.csiNameKey] imageAttributes.KmsID = values[cj.encryptKMSKey] + imageAttributes.EncryptionType = util.ParseEncryptionType(values[cj.encryptionType]) imageAttributes.Owner = values[cj.ownerKey] imageAttributes.ImageID = values[cj.csiImageIDKey] imageAttributes.BackingSnapshotID = values[cj.backingSnapshotIDKey] diff --git a/internal/kms/aws_metadata.go b/internal/kms/aws_metadata.go index f77bce6f3..f74a5f581 100644 --- a/internal/kms/aws_metadata.go +++ b/internal/kms/aws_metadata.go @@ -226,3 +226,7 @@ func (kms *awsMetadataKMS) DecryptDEK(volumeID, encryptedDEK string) (string, er return string(result.Plaintext), nil } + +func (kms *awsMetadataKMS) GetSecret(volumeID string) (string, error) { + return "", ErrGetSecretUnsupported +} diff --git a/internal/kms/dummy.go b/internal/kms/dummy.go new file mode 100644 index 000000000..fdf7ac459 --- /dev/null +++ b/internal/kms/dummy.go @@ -0,0 +1,71 @@ +/* +Copyright 2022 The Ceph-CSI Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kms + +import "encoding/base64" + +type TestDummyFunc func() EncryptionKMS + +type ProviderTest struct { + UniqueID string + CreateTestDummy TestDummyFunc +} + +type kmsTestProviderList struct { + providers map[string]ProviderTest +} + +var kmsTestManager = kmsTestProviderList{providers: map[string]ProviderTest{}} + +func RegisterTestProvider(provider ProviderTest) bool { + kmsTestManager.providers[provider.UniqueID] = provider + + return true +} + +func GetKMSTestDummy(kmsID string) EncryptionKMS { + provider, ok := kmsTestManager.providers[kmsID] + if !ok { + return nil + } + + return provider.CreateTestDummy() +} + +func GetKMSTestProvider() map[string]ProviderTest { + return kmsTestManager.providers +} + +func newDefaultTestDummy() EncryptionKMS { + return secretsKMS{passphrase: base64.URLEncoding.EncodeToString( + []byte("test dummy passphrase"))} +} + +func newSecretsMetadataTestDummy() EncryptionKMS { + smKMS := secretsMetadataKMS{} + smKMS.secretsKMS = secretsKMS{passphrase: base64.URLEncoding.EncodeToString( + []byte("test dummy passphrase"))} + + return smKMS +} + +var _ = RegisterTestProvider(ProviderTest{ + UniqueID: kmsTypeSecretsMetadata, + CreateTestDummy: newSecretsMetadataTestDummy, +}) + +var _ = RegisterTestProvider(ProviderTest{ + UniqueID: DefaultKMSType, + CreateTestDummy: newDefaultTestDummy, +}) diff --git a/internal/kms/keyprotect.go b/internal/kms/keyprotect.go index fdc795053..d020c75b5 100644 --- a/internal/kms/keyprotect.go +++ b/internal/kms/keyprotect.go @@ -242,3 +242,7 @@ func (kms *keyProtectKMS) DecryptDEK(volumeID, encryptedDEK string) (string, err return string(result), nil } + +func (kms *keyProtectKMS) GetSecret(volumeID string) (string, error) { + return "", ErrGetSecretUnsupported +} diff --git a/internal/kms/kmip.go b/internal/kms/kmip.go index 250d8f1db..f0e1fac2b 100644 --- a/internal/kms/kmip.go +++ b/internal/kms/kmip.go @@ -500,6 +500,10 @@ func (kms *kmipKMS) verifyResponse( return &batchItem, nil } +func (kms *kmipKMS) GetSecret(volumeID string) (string, error) { + return "", ErrGetSecretUnsupported +} + // TODO: use the following structs from https://github.com/gemalto/kmip-go // when https://github.com/ThalesGroup/kmip-go/issues/21 is resolved. // refer: https://docs.oasis-open.org/kmip/spec/v1.4/kmip-spec-v1.4.html. diff --git a/internal/kms/kms.go b/internal/kms/kms.go index a42e6d0df..82eadfd3e 100644 --- a/internal/kms/kms.go +++ b/internal/kms/kms.go @@ -19,6 +19,7 @@ package kms import ( "context" "encoding/json" + "errors" "fmt" "os" @@ -53,6 +54,11 @@ const ( DefaultKMSType = "default" ) +var ( + ErrGetSecretUnsupported = errors.New("KMS does not support access to user provided secret") + ErrGetSecretIntegrated = errors.New("integrated DEK stores do not allow GetSecret") +) + // GetKMS returns an instance of Key Management System. // // - tenant is the owner of the Volume, used to fetch the Vault Token from the @@ -332,6 +338,11 @@ type EncryptionKMS interface { // function does not need to do anything except return the encyptedDEK // as it was received. DecryptDEK(volumeID, encyptedDEK string) (string, error) + + // GetSecret allows external key management systems to + // retrieve keys used in EncryptDEK / DecryptDEK to use them + // directly. Example: fscrypt uses this to unlock raw protectors + GetSecret(volumeID string) (string, error) } // DEKStoreType describes what DEKStore needs to be configured when using a @@ -377,6 +388,10 @@ func (i integratedDEK) DecryptDEK(volumeID, encyptedDEK string) (string, error) return encyptedDEK, nil } +func (i integratedDEK) GetSecret(volumeID string) (string, error) { + return "", ErrGetSecretIntegrated +} + // getKeys takes a map that uses strings for keys and returns a slice with the // keys. func getKeys(m map[string]interface{}) []string { diff --git a/internal/kms/secretskms.go b/internal/kms/secretskms.go index 6fb479310..4b4866c79 100644 --- a/internal/kms/secretskms.go +++ b/internal/kms/secretskms.go @@ -263,6 +263,11 @@ func (kms secretsMetadataKMS) DecryptDEK(volumeID, encryptedDEK string) (string, return string(dek), nil } +func (kms secretsMetadataKMS) GetSecret(volumeID string) (string, error) { + // use the passphrase from the secretKMS + return kms.secretsKMS.FetchDEK(volumeID) +} + // generateCipher returns a AEAD cipher based on a passphrase and salt // (volumeID). The cipher can then be used to encrypt/decrypt the DEK. func generateCipher(passphrase, salt string) (cipher.AEAD, error) { diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index 2814813d1..0ed109ae4 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -384,7 +384,11 @@ func (cs *ControllerServer) CreateVolume( metadata := k8s.GetVolumeMetadata(req.GetParameters()) err = rbdVol.setAllMetadata(metadata) if err != nil { - return nil, err + if deleteErr := rbdVol.deleteImage(ctx); deleteErr != nil { + log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, deleteErr) + } + + return nil, status.Error(codes.Internal, err.Error()) } return buildCreateVolumeResponse(req, rbdVol), nil @@ -1556,7 +1560,7 @@ func (cs *ControllerServer) ControllerExpandVolume( // 2. Block VolumeMode with Encryption // Hence set nodeExpansion flag based on VolumeMode and Encryption status nodeExpansion := true - if req.GetVolumeCapability().GetBlock() != nil && !rbdVol.isEncrypted() { + if req.GetVolumeCapability().GetBlock() != nil && !rbdVol.isBlockEncrypted() { nodeExpansion = false } diff --git a/internal/rbd/encryption.go b/internal/rbd/encryption.go index 0b632aa99..ea65f14aa 100644 --- a/internal/rbd/encryption.go +++ b/internal/rbd/encryption.go @@ -61,6 +61,13 @@ const ( // DEK is stored. metadataDEK = "rbd.csi.ceph.com/dek" oldMetadataDEK = ".rbd.csi.ceph.com/dek" + + encryptionPassphraseSize = 20 + + // rbdDefaultEncryptionType is the default to use when the + // user did not specify an "encryptionType", but set + // "encryption": true. + rbdDefaultEncryptionType = util.EncryptionTypeBlock ) // checkRbdImageEncrypted verifies if rbd image was encrypted when created. @@ -91,16 +98,30 @@ func (ri *rbdImage) ensureEncryptionMetadataSet(status rbdEncryptionState) error return nil } -// isEncrypted returns `true` if the rbdImage is (or needs to be) encrypted. -func (ri *rbdImage) isEncrypted() bool { - return ri.encryption != nil +// isBlockEncrypted returns `true` if the rbdImage is (or needs to be) encrypted. +func (ri *rbdImage) isBlockEncrypted() bool { + return ri.blockEncryption != nil } -// setupEncryption configures the metadata of the RBD image for encryption: +// isFileEncrypted returns `true` if the filesystem on the rbdImage is (or needs to be) encrypted. +func (ri *rbdImage) isFileEncrypted() bool { + return ri.fileEncryption != nil +} + +func IsFileEncrypted(ctx context.Context, volOptions map[string]string) (bool, error) { + _, encType, err := ParseEncryptionOpts(ctx, volOptions, util.EncryptionTypeInvalid) + if err != nil { + return false, err + } + + return encType == util.EncryptionTypeFile, nil +} + +// setupBlockEncryption configures the metadata of the RBD image for encryption: // - the Data-Encryption-Key (DEK) will be generated stored for use by the KMS; // - the RBD image will be marked to support encryption in its metadata. -func (ri *rbdImage) setupEncryption(ctx context.Context) error { - err := ri.encryption.StoreNewCryptoPassphrase(ri.VolID) +func (ri *rbdImage) setupBlockEncryption(ctx context.Context) error { + err := ri.blockEncryption.StoreNewCryptoPassphrase(ri.VolID, encryptionPassphraseSize) if err != nil { log.ErrorLog(ctx, "failed to save encryption passphrase for "+ "image %s: %s", ri, err) @@ -130,7 +151,7 @@ func (ri *rbdImage) setupEncryption(ctx context.Context) error { // (Usecase: Restoring snapshot into a storageclass with different encryption config). func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool) error { // nothing to do if parent image is not encrypted. - if !ri.isEncrypted() { + if !ri.isBlockEncrypted() && !ri.isFileEncrypted() { return nil } @@ -139,25 +160,54 @@ func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool) "set!? Call stack: %s", ri, cp, ri.VolID, util.CallStack()) } - // get the unencrypted passphrase - passphrase, err := ri.encryption.GetCryptoPassphrase(ri.VolID) - if err != nil { - return fmt.Errorf("failed to fetch passphrase for %q: %w", - ri, err) - } + if ri.isBlockEncrypted() { + // get the unencrypted passphrase + passphrase, err := ri.blockEncryption.GetCryptoPassphrase(ri.VolID) + if err != nil { + return fmt.Errorf("failed to fetch passphrase for %q: %w", + ri, err) + } - if !copyOnlyPassphrase { - cp.encryption, err = util.NewVolumeEncryption(ri.encryption.GetID(), ri.encryption.KMS) - if errors.Is(err, util.ErrDEKStoreNeeded) { - cp.encryption.SetDEKStore(cp) + if !copyOnlyPassphrase { + cp.blockEncryption, err = util.NewVolumeEncryption(ri.blockEncryption.GetID(), ri.blockEncryption.KMS) + if errors.Is(err, util.ErrDEKStoreNeeded) { + cp.blockEncryption.SetDEKStore(cp) + } + } + + // re-encrypt the plain passphrase for the cloned volume + err = cp.blockEncryption.StoreCryptoPassphrase(cp.VolID, passphrase) + if err != nil { + return fmt.Errorf("failed to store passphrase for %q: %w", + cp, err) } } - // re-encrypt the plain passphrase for the cloned volume - err = cp.encryption.StoreCryptoPassphrase(cp.VolID, passphrase) - if err != nil { - return fmt.Errorf("failed to store passphrase for %q: %w", - cp, err) + if ri.isFileEncrypted() && !copyOnlyPassphrase { + var err error + cp.fileEncryption, err = util.NewVolumeEncryption(ri.fileEncryption.GetID(), ri.fileEncryption.KMS) + if errors.Is(err, util.ErrDEKStoreNeeded) { + _, err := ri.fileEncryption.KMS.GetSecret("") + if errors.Is(err, kmsapi.ErrGetSecretUnsupported) { + return err + } + } + } + + if ri.isFileEncrypted() && ri.fileEncryption.KMS.RequiresDEKStore() == kmsapi.DEKStoreIntegrated { + // get the unencrypted passphrase + passphrase, err := ri.fileEncryption.GetCryptoPassphrase(ri.VolID) + if err != nil { + return fmt.Errorf("failed to fetch passphrase for %q: %w", + ri, err) + } + + // re-encrypt the plain passphrase for the cloned volume + err = cp.fileEncryption.StoreCryptoPassphrase(cp.VolID, passphrase) + if err != nil { + return fmt.Errorf("failed to store passphrase for %q: %w", + cp, err) + } } // copy encryption status for the original volume @@ -166,6 +216,7 @@ func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool) return fmt.Errorf("failed to get encryption status for %q: %w", ri, err) } + err = cp.ensureEncryptionMetadataSet(status) if err != nil { return fmt.Errorf("failed to store encryption status for %q: "+ @@ -178,12 +229,12 @@ func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool) // repairEncryptionConfig checks the encryption state of the current rbdImage, // and makes sure that the destination rbdImage has the same configuration. func (ri *rbdImage) repairEncryptionConfig(dest *rbdImage) error { - if !ri.isEncrypted() { + if !ri.isBlockEncrypted() && !ri.isFileEncrypted() { return nil } // if ri is encrypted, copy its configuration in case it is missing - if !dest.isEncrypted() { + if !dest.isBlockEncrypted() && !dest.isFileEncrypted() { // dest needs to be connected to the cluster, otherwise it will // not be possible to write any metadata if dest.conn == nil { @@ -197,7 +248,7 @@ func (ri *rbdImage) repairEncryptionConfig(dest *rbdImage) error { } func (ri *rbdImage) encryptDevice(ctx context.Context, devicePath string) error { - passphrase, err := ri.encryption.GetCryptoPassphrase(ri.VolID) + passphrase, err := ri.blockEncryption.GetCryptoPassphrase(ri.VolID) if err != nil { log.ErrorLog(ctx, "failed to get crypto passphrase for %s: %v", ri, err) @@ -223,7 +274,7 @@ func (ri *rbdImage) encryptDevice(ctx context.Context, devicePath string) error } func (rv *rbdVolume) openEncryptedDevice(ctx context.Context, devicePath string) (string, error) { - passphrase, err := rv.encryption.GetCryptoPassphrase(rv.VolID) + passphrase, err := rv.blockEncryption.GetCryptoPassphrase(rv.VolID) if err != nil { log.ErrorLog(ctx, "failed to get passphrase for encrypted device %s: %v", rv, err) @@ -255,14 +306,22 @@ func (rv *rbdVolume) openEncryptedDevice(ctx context.Context, devicePath string) } func (ri *rbdImage) initKMS(ctx context.Context, volOptions, credentials map[string]string) error { - kmsID, err := ri.ParseEncryptionOpts(ctx, volOptions) + kmsID, encType, err := ParseEncryptionOpts(ctx, volOptions, rbdDefaultEncryptionType) if err != nil { return err - } else if kmsID == "" { + } + + switch encType { + case util.EncryptionTypeBlock: + err = ri.configureBlockEncryption(kmsID, credentials) + case util.EncryptionTypeFile: + err = ri.configureFileEncryption(kmsID, credentials) + case util.EncryptionTypeInvalid: + return fmt.Errorf("invalid encryption type") + case util.EncryptionTypeNone: return nil } - err = ri.configureEncryption(kmsID, credentials) if err != nil { return fmt.Errorf("invalid encryption kms configuration: %w", err) } @@ -271,10 +330,11 @@ func (ri *rbdImage) initKMS(ctx context.Context, volOptions, credentials map[str } // ParseEncryptionOpts returns kmsID and sets Owner attribute. -func (ri *rbdImage) ParseEncryptionOpts( +func ParseEncryptionOpts( ctx context.Context, volOptions map[string]string, -) (string, error) { + fallbackEncType util.EncryptionType, +) (string, util.EncryptionType, error) { var ( err error ok bool @@ -282,30 +342,57 @@ func (ri *rbdImage) ParseEncryptionOpts( ) encrypted, ok = volOptions["encrypted"] if !ok { - return "", nil + return "", util.EncryptionTypeNone, nil } kmsID, err = util.FetchEncryptionKMSID(encrypted, volOptions["encryptionKMSID"]) if err != nil { - return "", err + return "", util.EncryptionTypeInvalid, err } - return kmsID, nil + encType := util.FetchEncryptionType(volOptions, fallbackEncType) + + return kmsID, encType, nil } -// configureEncryption sets up the VolumeEncryption for this rbdImage. Once -// configured, use isEncrypted() to see if the volume supports encryption. -func (ri *rbdImage) configureEncryption(kmsID string, credentials map[string]string) error { +// configureBlockDeviceEncryption sets up the VolumeEncryption for this rbdImage. Once +// configured, use isBlockEncrypted() to see if the volume supports block encryption. +func (ri *rbdImage) configureBlockEncryption(kmsID string, credentials map[string]string) error { kms, err := kmsapi.GetKMS(ri.Owner, kmsID, credentials) if err != nil { return err } - ri.encryption, err = util.NewVolumeEncryption(kmsID, kms) + ri.blockEncryption, err = util.NewVolumeEncryption(kmsID, kms) // if the KMS can not store the DEK itself, we'll store it in the // metadata of the RBD image itself if errors.Is(err, util.ErrDEKStoreNeeded) { - ri.encryption.SetDEKStore(ri) + ri.blockEncryption.SetDEKStore(ri) + } + + return nil +} + +// configureBlockDeviceEncryption sets up the VolumeEncryption for this rbdImage. Once +// configured, use isEncrypted() to see if the volume supports encryption. +func (ri *rbdImage) configureFileEncryption(kmsID string, credentials map[string]string) error { + kms, err := kmsapi.GetKMS(ri.Owner, kmsID, credentials) + if err != nil { + return err + } + + ri.fileEncryption, err = util.NewVolumeEncryption(kmsID, kms) + + if errors.Is(err, util.ErrDEKStoreNeeded) { + // fscrypt uses secrets directly from the KMS. + // Therefore we do not support an additional DEK + // store. Since not all "metadata" KMS support + // GetSecret, test for support here. Postpone any + // other error handling + _, err := ri.fileEncryption.KMS.GetSecret("") + if errors.Is(err, kmsapi.ErrGetSecretUnsupported) { + return err + } } return nil diff --git a/internal/rbd/nodeserver.go b/internal/rbd/nodeserver.go index 9b8c17fa3..96d7989a5 100644 --- a/internal/rbd/nodeserver.go +++ b/internal/rbd/nodeserver.go @@ -27,6 +27,7 @@ import ( csicommon "github.com/ceph/ceph-csi/internal/csi-common" "github.com/ceph/ceph-csi/internal/journal" "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/fscrypt" "github.com/ceph/ceph-csi/internal/util/log" librbd "github.com/ceph/go-ceph/rbd" @@ -55,8 +56,8 @@ type stageTransaction struct { isStagePathCreated bool // isMounted represents if the volume was mounted or not isMounted bool - // isEncrypted represents if the volume was encrypted or not - isEncrypted bool + // isBlockEncrypted represents if the volume was encrypted or not + isBlockEncrypted bool // devicePath represents the path where rbd device is mapped devicePath string } @@ -425,12 +426,18 @@ func (ns *NodeServer) stageTransaction( } } - if volOptions.isEncrypted() { + if volOptions.isBlockEncrypted() { devicePath, err = ns.processEncryptedDevice(ctx, volOptions, devicePath) if err != nil { return transaction, err } - transaction.isEncrypted = true + transaction.isBlockEncrypted = true + } + + if volOptions.isFileEncrypted() { + if err = fscrypt.InitializeNode(ctx); err != nil { + return transaction, fmt.Errorf("file encryption setup for %s failed: %w", volOptions.VolID, err) + } } stagingTargetPath := getStagingTargetPath(req) @@ -444,12 +451,21 @@ func (ns *NodeServer) stageTransaction( transaction.isStagePathCreated = true // nodeStage Path - err = ns.mountVolumeToStagePath(ctx, req, staticVol, stagingTargetPath, devicePath) + err = ns.mountVolumeToStagePath(ctx, req, staticVol, stagingTargetPath, devicePath, volOptions.isFileEncrypted()) if err != nil { return transaction, err } transaction.isMounted = true + if volOptions.isFileEncrypted() { + log.DebugLog(ctx, "rbd fscrypt: trying to unlock filesystem on %s image %s", stagingTargetPath, volOptions.VolID) + err = fscrypt.Unlock(ctx, volOptions.fileEncryption, stagingTargetPath, volOptions.VolID) + if err != nil { + return transaction, fmt.Errorf("file system encryption unlock in %s image %s failed: %w", + stagingTargetPath, volOptions.VolID, err) + } + } + // As we are supporting the restore of a volume to a bigger size and // creating bigger size clone from a volume, we need to check filesystem // resize is required, if required resize filesystem. @@ -475,13 +491,13 @@ func resizeNodeStagePath(ctx context.Context, var ok bool // if its a non encrypted block device we dont need any expansion - if isBlock && !transaction.isEncrypted { + if isBlock && !transaction.isBlockEncrypted { return nil } resizer := mount.NewResizeFs(utilexec.New()) - if transaction.isEncrypted { + if transaction.isBlockEncrypted { devicePath, err = resizeEncryptedDevice(ctx, volID, stagingTargetPath, devicePath) if err != nil { return status.Error(codes.Internal, err.Error()) @@ -611,7 +627,7 @@ func (ns *NodeServer) undoStagingTransaction( // Unmapping rbd device if transaction.devicePath != "" { - err = detachRBDDevice(ctx, transaction.devicePath, volID, volOptions.UnmapOptions, transaction.isEncrypted) + err = detachRBDDevice(ctx, transaction.devicePath, volID, volOptions.UnmapOptions, transaction.isBlockEncrypted) if err != nil { log.ErrorLog( ctx, @@ -691,6 +707,17 @@ func (ns *NodeServer) NodePublishVolume( return &csi.NodePublishVolumeResponse{}, nil } + fileEncrypted, err := IsFileEncrypted(ctx, req.GetVolumeContext()) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if fileEncrypted { + stagingPath = fscrypt.AppendEncyptedSubdirectory(stagingPath) + if err = fscrypt.IsDirectoryUnlocked(stagingPath, req.GetVolumeCapability().GetMount().GetFsType()); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + // Publish Path err = ns.mountVolume(ctx, stagingPath, req) if err != nil { @@ -707,6 +734,7 @@ func (ns *NodeServer) mountVolumeToStagePath( req *csi.NodeStageVolumeRequest, staticVol bool, stagingPath, devicePath string, + fileEncryption bool, ) error { readOnly := false fsType := req.GetVolumeCapability().GetMount().GetFsType() @@ -751,7 +779,11 @@ func (ns *NodeServer) mountVolumeToStagePath( args := []string{} switch fsType { case "ext4": - args = []string{"-m0", "-Enodiscard,lazy_itable_init=1,lazy_journal_init=1", devicePath} + args = []string{"-m0", "-Enodiscard,lazy_itable_init=1,lazy_journal_init=1"} + if fileEncryption { + args = append(args, "-Oencrypt") + } + args = append(args, devicePath) case "xfs": args = []string{"-K", devicePath} // always disable reflink @@ -1146,7 +1178,7 @@ func (ns *NodeServer) processEncryptedDevice( // CreateVolume. // Use the same setupEncryption() as CreateVolume does, and // continue with the common process to crypt-format the device. - err = volOptions.setupEncryption(ctx) + err = volOptions.setupBlockEncryption(ctx) if err != nil { log.ErrorLog(ctx, "failed to setup encryption for rbd"+ "image %s: %v", imageSpec, err) diff --git a/internal/rbd/rbd_attach.go b/internal/rbd/rbd_attach.go index ad326ac81..1af3f065b 100644 --- a/internal/rbd/rbd_attach.go +++ b/internal/rbd/rbd_attach.go @@ -473,7 +473,7 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util. imageOrDeviceSpec: imagePath, isImageSpec: true, isNbd: isNbd, - encrypted: volOpt.isEncrypted(), + encrypted: volOpt.isBlockEncrypted(), volumeID: volOpt.VolID, unmapOptions: volOpt.UnmapOptions, logDir: volOpt.LogDir, diff --git a/internal/rbd/rbd_journal.go b/internal/rbd/rbd_journal.go index eadfa99e7..42e504d71 100644 --- a/internal/rbd/rbd_journal.go +++ b/internal/rbd/rbd_journal.go @@ -87,6 +87,17 @@ func validateRbdVol(rbdVol *rbdVolume) error { return err } +func getEncryptionConfig(rbdVol *rbdVolume) (string, util.EncryptionType) { + switch { + case rbdVol.isBlockEncrypted(): + return rbdVol.blockEncryption.GetID(), util.EncryptionTypeBlock + case rbdVol.isFileEncrypted(): + return rbdVol.fileEncryption.GetID(), util.EncryptionTypeFile + default: + return "", util.EncryptionTypeNone + } +} + /* checkSnapCloneExists, and its counterpart checkVolExists, function checks if the passed in rbdSnapshot or rbdVolume exists on the backend. @@ -130,7 +141,7 @@ func checkSnapCloneExists( defer j.Destroy() snapData, err := j.CheckReservation(ctx, rbdSnap.JournalPool, - rbdSnap.RequestName, rbdSnap.NamePrefix, rbdSnap.RbdImageName, "") + rbdSnap.RequestName, rbdSnap.NamePrefix, rbdSnap.RbdImageName, "", util.EncryptionTypeNone) if err != nil { return false, err } @@ -245,10 +256,7 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er return false, err } - kmsID := "" - if rv.isEncrypted() { - kmsID = rv.encryption.GetID() - } + kmsID, encryptionType := getEncryptionConfig(rv) j, err := volJournal.Connect(rv.Monitors, rv.RadosNamespace, rv.conn.Creds) if err != nil { @@ -257,7 +265,7 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er defer j.Destroy() imageData, err := j.CheckReservation( - ctx, rv.JournalPool, rv.RequestName, rv.NamePrefix, "", kmsID) + ctx, rv.JournalPool, rv.RequestName, rv.NamePrefix, "", kmsID, encryptionType) if err != nil { return false, err } @@ -386,14 +394,12 @@ func reserveSnap(ctx context.Context, rbdSnap *rbdSnapshot, rbdVol *rbdVolume, c } defer j.Destroy() - kmsID := "" - if rbdVol.isEncrypted() { - kmsID = rbdVol.encryption.GetID() - } + kmsID, encryptionType := getEncryptionConfig(rbdVol) rbdSnap.ReservedID, rbdSnap.RbdSnapName, err = j.ReserveName( ctx, rbdSnap.JournalPool, journalPoolID, rbdSnap.Pool, imagePoolID, - rbdSnap.RequestName, rbdSnap.NamePrefix, rbdVol.RbdImageName, kmsID, rbdSnap.ReservedID, rbdVol.Owner, "") + rbdSnap.RequestName, rbdSnap.NamePrefix, rbdVol.RbdImageName, kmsID, rbdSnap.ReservedID, rbdVol.Owner, + "", encryptionType) if err != nil { return err } @@ -460,10 +466,7 @@ func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr return err } - kmsID := "" - if rbdVol.isEncrypted() { - kmsID = rbdVol.encryption.GetID() - } + kmsID, encryptionType := getEncryptionConfig(rbdVol) j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr) if err != nil { @@ -473,7 +476,7 @@ func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr rbdVol.ReservedID, rbdVol.RbdImageName, err = j.ReserveName( ctx, rbdVol.JournalPool, journalPoolID, rbdVol.Pool, imagePoolID, - rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, rbdVol.ReservedID, rbdVol.Owner, "") + rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, rbdVol.ReservedID, rbdVol.Owner, "", encryptionType) if err != nil { return err } @@ -548,11 +551,12 @@ func RegenerateJournal( ) (string, error) { ctx := context.Background() var ( - vi util.CSIIdentifier - rbdVol *rbdVolume - kmsID string - err error - ok bool + vi util.CSIIdentifier + rbdVol *rbdVolume + kmsID string + encryptionType util.EncryptionType + err error + ok bool ) rbdVol = &rbdVolume{} @@ -568,7 +572,7 @@ func RegenerateJournal( rbdVol.Owner = owner - kmsID, err = rbdVol.ParseEncryptionOpts(ctx, volumeAttributes) + kmsID, encryptionType, err = ParseEncryptionOpts(ctx, volumeAttributes, util.EncryptionTypeNone) if err != nil { return "", err } @@ -605,7 +609,7 @@ func RegenerateJournal( rbdVol.NamePrefix = volumeAttributes["volumeNamePrefix"] imageData, err := j.CheckReservation( - ctx, rbdVol.JournalPool, rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID) + ctx, rbdVol.JournalPool, rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, encryptionType) if err != nil { return "", err } @@ -639,7 +643,7 @@ func RegenerateJournal( rbdVol.ReservedID, rbdVol.RbdImageName, err = j.ReserveName( ctx, rbdVol.JournalPool, journalPoolID, rbdVol.Pool, imagePoolID, - rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, vi.ObjectUUID, rbdVol.Owner, "") + rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, vi.ObjectUUID, rbdVol.Owner, "", encryptionType) if err != nil { return "", err } diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index d32c4d391..edfe4e2b4 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -118,6 +118,7 @@ type rbdImage struct { ParentPool string // Cluster name ClusterName string + // Owner is the creator (tenant, Kubernetes Namespace) of the volume Owner string @@ -130,9 +131,14 @@ type rbdImage struct { ObjectSize uint64 ImageFeatureSet librbd.FeatureSet - // encryption provides access to optional VolumeEncryption functions - encryption *util.VolumeEncryption - CreatedAt *timestamp.Timestamp + + // blockEncryption provides access to optional VolumeEncryption functions (e.g LUKS) + blockEncryption *util.VolumeEncryption + // fileEncryption provides access to optional VolumeEncryption functions (e.g fscrypt) + fileEncryption *util.VolumeEncryption + + CreatedAt *timestamp.Timestamp + // conn is a connection to the Ceph cluster obtained from a ConnPool conn *util.ClusterConnection // an opened IOContext, call .openIoctx() before using @@ -384,8 +390,11 @@ func (ri *rbdImage) Destroy() { if ri.conn != nil { ri.conn.Destroy() } - if ri.isEncrypted() { - ri.encryption.Destroy() + if ri.isBlockEncrypted() { + ri.blockEncryption.Destroy() + } + if ri.isFileEncrypted() { + ri.fileEncryption.Destroy() } } @@ -438,8 +447,8 @@ func createImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er return fmt.Errorf("failed to create rbd image: %w", err) } - if pOpts.isEncrypted() { - err = pOpts.setupEncryption(ctx) + if pOpts.isBlockEncrypted() { + err = pOpts.setupBlockEncryption(ctx) if err != nil { return fmt.Errorf("failed to setup encryption for image %s: %w", pOpts, err) } @@ -624,10 +633,17 @@ func (ri *rbdImage) deleteImage(ctx context.Context) error { return err } - if ri.isEncrypted() { - log.DebugLog(ctx, "rbd: going to remove DEK for %q", ri) - if err = ri.encryption.RemoveDEK(ri.VolID); err != nil { - log.WarningLog(ctx, "failed to clean the passphrase for volume %s: %s", ri.VolID, err) + if ri.isBlockEncrypted() { + log.DebugLog(ctx, "rbd: going to remove DEK for %q (block encryption)", ri) + if err = ri.blockEncryption.RemoveDEK(ri.VolID); err != nil { + log.WarningLog(ctx, "failed to clean the passphrase for volume %s (block encryption): %s", ri.VolID, err) + } + } + + if ri.isFileEncrypted() { + log.DebugLog(ctx, "rbd: going to remove DEK for %q (file encryption)", ri) + if err = ri.fileEncryption.RemoveDEK(ri.VolID); err != nil { + log.WarningLog(ctx, "failed to clean the passphrase for volume %s (file encryption): %s", ri.VolID, err) } } @@ -1008,10 +1024,17 @@ func genSnapFromSnapID( rbdSnap, err) } - if imageAttributes.KmsID != "" { - err = rbdSnap.configureEncryption(imageAttributes.KmsID, secrets) + if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeBlock { + err = rbdSnap.configureBlockEncryption(imageAttributes.KmsID, secrets) if err != nil { - return fmt.Errorf("failed to configure encryption for "+ + return fmt.Errorf("failed to configure block encryption for "+ + "%q: %w", rbdSnap, err) + } + } + if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeFile { + err = rbdSnap.configureFileEncryption(imageAttributes.KmsID, secrets) + if err != nil { + return fmt.Errorf("failed to configure file encryption for "+ "%q: %w", rbdSnap, err) } } @@ -1103,8 +1126,14 @@ func generateVolumeFromVolumeID( rbdVol.ImageID = imageAttributes.ImageID rbdVol.Owner = imageAttributes.Owner - if imageAttributes.KmsID != "" { - err = rbdVol.configureEncryption(imageAttributes.KmsID, secrets) + if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeBlock { + err = rbdVol.configureBlockEncryption(imageAttributes.KmsID, secrets) + if err != nil { + return rbdVol, err + } + } + if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeFile { + err = rbdVol.configureFileEncryption(imageAttributes.KmsID, secrets) if err != nil { return rbdVol, err } @@ -1681,7 +1710,7 @@ func stashRBDImageMetadata(volOptions *rbdVolume, metaDataPath string) error { Pool: volOptions.Pool, RadosNamespace: volOptions.RadosNamespace, ImageName: volOptions.RbdImageName, - Encrypted: volOptions.isEncrypted(), + Encrypted: volOptions.isBlockEncrypted(), UnmapOptions: volOptions.UnmapOptions, } @@ -1961,11 +1990,13 @@ func (ri *rbdImage) getOrigSnapName(snapID uint64) (string, error) { } func (ri *rbdImage) isCompatibleEncryption(dst *rbdImage) error { + riEncrypted := ri.isBlockEncrypted() || ri.isFileEncrypted() + dstEncrypted := dst.isBlockEncrypted() || dst.isFileEncrypted() switch { - case ri.isEncrypted() && !dst.isEncrypted(): + case riEncrypted && !dstEncrypted: return fmt.Errorf("cannot create unencrypted volume from encrypted volume %q", ri) - case !ri.isEncrypted() && dst.isEncrypted(): + case !riEncrypted && dstEncrypted: return fmt.Errorf("cannot create encrypted volume from unencrypted volume %q", ri) } diff --git a/internal/rbd/snapshot.go b/internal/rbd/snapshot.go index 089946757..e793d3e01 100644 --- a/internal/rbd/snapshot.go +++ b/internal/rbd/snapshot.go @@ -111,7 +111,8 @@ func generateVolFromSnap(rbdSnap *rbdSnapshot) *rbdVolume { // copyEncryptionConfig cannot be used here because the volume and the // snapshot will have the same volumeID which cases the panic in // copyEncryptionConfig function. - vol.encryption = rbdSnap.encryption + vol.blockEncryption = rbdSnap.blockEncryption + vol.fileEncryption = rbdSnap.fileEncryption return vol } diff --git a/internal/util/crypto.go b/internal/util/crypto.go index bec6b18a8..9eb67a1db 100644 --- a/internal/util/crypto.go +++ b/internal/util/crypto.go @@ -36,7 +36,7 @@ const ( // Passphrase size - 20 bytes is 160 bits to satisfy: // https://tools.ietf.org/html/rfc6749#section-10.10 - encryptionPassphraseSize = 20 + defaultEncryptionPassphraseSize = 20 ) var ( @@ -80,6 +80,68 @@ func FetchEncryptionKMSID(encrypted, kmsID string) (string, error) { return kmsID, nil } +type EncryptionType int + +const ( + // EncryptionTypeInvalid signals invalid or unsupported configuration. + EncryptionTypeInvalid EncryptionType = iota + // EncryptionTypeNone disables encryption. + EncryptionTypeNone + // EncryptionTypeBlock enables block encryption. + EncryptionTypeBlock + // EncryptionTypeBlock enables file encryption (fscrypt). + EncryptionTypeFile +) + +const ( + encryptionTypeBlockString = "block" + encryptionTypeFileString = "file" +) + +func ParseEncryptionType(typeStr string) EncryptionType { + switch typeStr { + case encryptionTypeBlockString: + return EncryptionTypeBlock + case encryptionTypeFileString: + return EncryptionTypeFile + case "": + return EncryptionTypeNone + default: + return EncryptionTypeInvalid + } +} + +func EncryptionTypeString(encType EncryptionType) string { + switch encType { + case EncryptionTypeBlock: + return encryptionTypeBlockString + case EncryptionTypeFile: + return encryptionTypeFileString + case EncryptionTypeNone: + return "" + case EncryptionTypeInvalid: + return "INVALID" + default: + return "UNKNOWN" + } +} + +// FetchEncryptionType returns encryptionType specified in volOptions. +// If not specified, use fallback. If specified but invalid, return +// invalid. +func FetchEncryptionType(volOptions map[string]string, fallback EncryptionType) EncryptionType { + encType, ok := volOptions["encryptionType"] + if !ok { + return fallback + } + + if encType == "" { + return EncryptionTypeInvalid + } + + return ParseEncryptionType(encType) +} + // NewVolumeEncryption creates a new instance of VolumeEncryption and // configures the DEKStore. If the KMS does not provide a DEKStore interface, // the VolumeEncryption will be created *and* a ErrDEKStoreNeeded is returned. @@ -156,8 +218,8 @@ func (ve *VolumeEncryption) StoreCryptoPassphrase(volumeID, passphrase string) e } // StoreNewCryptoPassphrase generates a new passphrase and saves it in the KMS. -func (ve *VolumeEncryption) StoreNewCryptoPassphrase(volumeID string) error { - passphrase, err := generateNewEncryptionPassphrase() +func (ve *VolumeEncryption) StoreNewCryptoPassphrase(volumeID string, length int) error { + passphrase, err := generateNewEncryptionPassphrase(length) if err != nil { return fmt.Errorf("failed to generate passphrase for %s: %w", volumeID, err) } @@ -176,8 +238,8 @@ func (ve *VolumeEncryption) GetCryptoPassphrase(volumeID string) (string, error) } // generateNewEncryptionPassphrase generates a random passphrase for encryption. -func generateNewEncryptionPassphrase() (string, error) { - bytesPassphrase := make([]byte, encryptionPassphraseSize) +func generateNewEncryptionPassphrase(length int) (string, error) { + bytesPassphrase := make([]byte, length) _, err := rand.Read(bytesPassphrase) if err != nil { return "", err diff --git a/internal/util/crypto_test.go b/internal/util/crypto_test.go index 28b8fefea..f4f0f5716 100644 --- a/internal/util/crypto_test.go +++ b/internal/util/crypto_test.go @@ -28,14 +28,14 @@ import ( func TestGenerateNewEncryptionPassphrase(t *testing.T) { t.Parallel() - b64Passphrase, err := generateNewEncryptionPassphrase() + b64Passphrase, err := generateNewEncryptionPassphrase(defaultEncryptionPassphraseSize) require.NoError(t, err) // b64Passphrase is URL-encoded, decode to verify the length of the // passphrase passphrase, err := base64.URLEncoding.DecodeString(b64Passphrase) assert.NoError(t, err) - assert.Equal(t, encryptionPassphraseSize, len(passphrase)) + assert.Equal(t, defaultEncryptionPassphraseSize, len(passphrase)) } func TestKMSWorkflow(t *testing.T) { @@ -56,10 +56,41 @@ func TestKMSWorkflow(t *testing.T) { volumeID := "volume-id" - err = ve.StoreNewCryptoPassphrase(volumeID) + err = ve.StoreNewCryptoPassphrase(volumeID, defaultEncryptionPassphraseSize) assert.NoError(t, err) passphrase, err := ve.GetCryptoPassphrase(volumeID) assert.NoError(t, err) assert.Equal(t, secrets["encryptionPassphrase"], passphrase) } + +func TestEncryptionType(t *testing.T) { + t.Parallel() + assert.EqualValues(t, EncryptionTypeInvalid, ParseEncryptionType("wat?")) + assert.EqualValues(t, EncryptionTypeInvalid, ParseEncryptionType("both")) + assert.EqualValues(t, EncryptionTypeInvalid, ParseEncryptionType("file,block")) + assert.EqualValues(t, EncryptionTypeInvalid, ParseEncryptionType("block,file")) + assert.EqualValues(t, EncryptionTypeBlock, ParseEncryptionType("block")) + assert.EqualValues(t, EncryptionTypeFile, ParseEncryptionType("file")) + assert.EqualValues(t, EncryptionTypeNone, ParseEncryptionType("")) + + for _, s := range []string{"file", "block", ""} { + assert.EqualValues(t, s, EncryptionTypeString(ParseEncryptionType(s))) + } +} + +func TestFetchEncryptionType(t *testing.T) { + t.Parallel() + volOpts := map[string]string{} + assert.EqualValues(t, EncryptionTypeBlock, FetchEncryptionType(volOpts, EncryptionTypeBlock)) + assert.EqualValues(t, EncryptionTypeFile, FetchEncryptionType(volOpts, EncryptionTypeFile)) + assert.EqualValues(t, EncryptionTypeNone, FetchEncryptionType(volOpts, EncryptionTypeNone)) + volOpts["encryptionType"] = "" + assert.EqualValues(t, EncryptionTypeInvalid, FetchEncryptionType(volOpts, EncryptionTypeNone)) + volOpts["encryptionType"] = "block" + assert.EqualValues(t, EncryptionTypeBlock, FetchEncryptionType(volOpts, EncryptionTypeNone)) + volOpts["encryptionType"] = "file" + assert.EqualValues(t, EncryptionTypeFile, FetchEncryptionType(volOpts, EncryptionTypeNone)) + volOpts["encryptionType"] = "INVALID" + assert.EqualValues(t, EncryptionTypeInvalid, FetchEncryptionType(volOpts, EncryptionTypeNone)) +} diff --git a/internal/util/fscrypt/fscrypt.go b/internal/util/fscrypt/fscrypt.go new file mode 100644 index 000000000..6f87b69d6 --- /dev/null +++ b/internal/util/fscrypt/fscrypt.go @@ -0,0 +1,439 @@ +/* +Copyright 2022 The Ceph-CSI Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fscrypt + +/* +#include +*/ +import "C" + +import ( + "context" + "errors" + "fmt" + "os" + "os/user" + "path" + "time" + "unsafe" + + fscryptactions "github.com/google/fscrypt/actions" + fscryptcrypto "github.com/google/fscrypt/crypto" + fscryptfilesystem "github.com/google/fscrypt/filesystem" + fscryptmetadata "github.com/google/fscrypt/metadata" + "github.com/pkg/xattr" + "golang.org/x/sys/unix" + + "github.com/ceph/ceph-csi/internal/kms" + "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/log" +) + +const ( + FscryptHashingTimeTarget = 1 * time.Second + FscryptProtectorPrefix = "ceph-csi" + FscryptSubdir = "ceph-csi-encrypted" + encryptionPassphraseSize = 64 +) + +var policyV2Support = []util.KernelVersion{ + { + Version: 5, + PatchLevel: 4, + SubLevel: 0, + ExtraVersion: 0, + Distribution: "", + Backport: false, + }, +} + +func AppendEncyptedSubdirectory(dir string) string { + return path.Join(dir, FscryptSubdir) +} + +// getPassphrase returns the passphrase from the configured Ceph CSI KMS to be used as a protector key in fscrypt. +func getPassphrase(ctx context.Context, encryption util.VolumeEncryption, volID string) (string, error) { + var ( + passphrase string + err error + ) + + switch encryption.KMS.RequiresDEKStore() { + case kms.DEKStoreIntegrated: + passphrase, err = encryption.GetCryptoPassphrase(volID) + if err != nil { + log.ErrorLog(ctx, "fscrypt: failed to get passphrase from KMS: %v", err) + + return "", err + } + case kms.DEKStoreMetadata: + passphrase, err = encryption.KMS.GetSecret(volID) + if err != nil { + log.ErrorLog(ctx, "fscrypt: failed to GetSecret: %v", err) + + return "", err + } + } + + return passphrase, nil +} + +// createKeyFuncFromVolumeEncryption returns an fscrypt key function returning +// encryption keys form a VolumeEncryption struct. +func createKeyFuncFromVolumeEncryption( + ctx context.Context, + encryption util.VolumeEncryption, + volID string, +) (func(fscryptactions.ProtectorInfo, bool) (*fscryptcrypto.Key, error), error) { + keyFunc := func(info fscryptactions.ProtectorInfo, retry bool) (*fscryptcrypto.Key, error) { + passphrase, err := getPassphrase(ctx, encryption, volID) + if err != nil { + return nil, err + } + + key, err := fscryptcrypto.NewBlankKey(encryptionPassphraseSize / 2) + copy(key.Data(), passphrase) + + return key, err + } + + return keyFunc, nil +} + +// fsyncEncryptedDirectory calls sync on dirPath. It is intended to +// work around the fscrypt library not syncing the directory it sets a +// policy on. +// TODO Remove when the fscrypt dependency has https://github.com/google/fscrypt/pull/359 +func fsyncEncryptedDirectory(dirPath string) error { + dir, err := os.Open(dirPath) + if err != nil { + return err + } + defer dir.Close() + + return dir.Sync() +} + +// unlockExisting tries to unlock an already set up fscrypt directory using keys from Ceph CSI. +func unlockExisting( + ctx context.Context, + fscryptContext *fscryptactions.Context, + encryptedPath string, protectorName string, + keyFn func(fscryptactions.ProtectorInfo, bool) (*fscryptcrypto.Key, error), +) error { + var err error + + policy, err := fscryptactions.GetPolicyFromPath(fscryptContext, encryptedPath) + if err != nil { + log.ErrorLog(ctx, "fscrypt: policy get failed %v", err) + + return err + } + + optionFn := func(policyDescriptor string, options []*fscryptactions.ProtectorOption) (int, error) { + for idx, option := range options { + if option.Name() == protectorName { + return idx, nil + } + } + + return 0, &fscryptactions.ErrNotProtected{PolicyDescriptor: policyDescriptor, ProtectorDescriptor: protectorName} + } + + if err = policy.Unlock(optionFn, keyFn); err != nil { + log.ErrorLog(ctx, "fscrypt: unlock with protector error: %v", err) + + return err + } + + defer func() { + err = policy.Lock() + if err != nil { + log.ErrorLog(ctx, "fscrypt: failed to lock policy after use: %v", err) + } + }() + + if err = policy.Provision(); err != nil { + log.ErrorLog(ctx, "fscrypt: provision fail %v", err) + + return err + } + + log.DebugLog(ctx, "fscrypt protector unlock: %s %+v", protectorName, policy) + + return nil +} + +func initializeAndUnlock( + ctx context.Context, + fscryptContext *fscryptactions.Context, + encryptedPath string, protectorName string, + keyFn func(fscryptactions.ProtectorInfo, bool) (*fscryptcrypto.Key, error), +) error { + var owner *user.User + var err error + + if err = os.Mkdir(encryptedPath, 0o755); err != nil { + return err + } + + protector, err := fscryptactions.CreateProtector(fscryptContext, protectorName, keyFn, owner) + if err != nil { + log.ErrorLog(ctx, "fscrypt: protector name=%s create failed: %v. reverting.", protectorName, err) + if revertErr := protector.Revert(); revertErr != nil { + return revertErr + } + + return err + } + + if err = protector.Unlock(keyFn); err != nil { + return err + } + log.DebugLog(ctx, "fscrypt protector unlock: %+v", protector) + + var policy *fscryptactions.Policy + if policy, err = fscryptactions.CreatePolicy(fscryptContext, protector); err != nil { + return err + } + defer func() { + err = policy.Lock() + if err != nil { + log.ErrorLog(ctx, "fscrypt: failed to lock policy after init: %w") + err = policy.Revert() + if err != nil { + log.ErrorLog(ctx, "fscrypt: failed to revert policy after failed lock: %w") + } + } + }() + + if err = policy.UnlockWithProtector(protector); err != nil { + log.ErrorLog(ctx, "fscrypt: Failed to unlock policy: %v", err) + + return err + } + + if err = policy.Provision(); err != nil { + log.ErrorLog(ctx, "fscrypt: Failed to provision policy: %v", err) + + return err + } + + if err = policy.Apply(encryptedPath); err != nil { + log.ErrorLog(ctx, "fscrypt: Failed to apply protector (see also kernel log): %w", err) + if err = policy.Deprovision(false); err != nil { + log.ErrorLog(ctx, "fscrypt: Policy cleanup response to failing apply failed: %w", err) + } + + return err + } + + if err = fsyncEncryptedDirectory(encryptedPath); err != nil { + log.ErrorLog(ctx, "fscrypt: fsync encrypted dir - to flush kernel policy to disk failed %v", err) + + return err + } + + return nil +} + +// getInodeEncryptedAttribute returns the inode's encrypt attribute similar to lsattr(1) +func getInodeEncryptedAttribute(p string) (bool, error) { + file, err := os.Open(p) + if err != nil { + return false, err + } + defer file.Close() + + var attr int + _, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), unix.FS_IOC_GETFLAGS, + uintptr(unsafe.Pointer(&attr))) + if errno != 0 { + return false, fmt.Errorf("error calling ioctl_iflags: %w", errno) + } + + if attr&C.FS_ENCRYPT_FL != 0 { + return true, nil + } + + return false, nil +} + +// IsDirectoryUnlockedFscrypt checks if a directory is an unlocked fscrypted directory. +func IsDirectoryUnlocked(directoryPath, filesystem string) error { + if _, err := fscryptmetadata.GetPolicy(directoryPath); err != nil { + return fmt.Errorf("no fscrypt policy set on directory %q: %w", directoryPath, err) + } + + switch filesystem { + case "ceph": + _, err := xattr.Get(directoryPath, "ceph.fscrypt.auth") + if err != nil { + return fmt.Errorf("error reading ceph.fscrypt.auth xattr on %q: %w", directoryPath, err) + } + default: + encrypted, err := getInodeEncryptedAttribute(directoryPath) + if err != nil { + return err + } + + if !encrypted { + return fmt.Errorf("path %s does not have the encrypted inode flag set. Encryption init must have failed", + directoryPath) + } + } + + return nil +} + +func getBestPolicyVersion() (int64, error) { + // fetch the current running kernel info + release, err := util.GetKernelVersion() + if err != nil { + return 0, fmt.Errorf("fetching current kernel version failed: %w", err) + } + + switch { + case util.CheckKernelSupport(release, policyV2Support): + return 2, nil + default: + return 1, nil + } +} + +// InitializeNode performs once per nodeserver initialization +// required by the fscrypt library. Creates /etc/fscrypt.conf. +func InitializeNode(ctx context.Context) error { + policyVersion, err := getBestPolicyVersion() + if err != nil { + return fmt.Errorf("fscrypt node init failed to determine best policy version: %w", err) + } + + err = fscryptactions.CreateConfigFile(FscryptHashingTimeTarget, policyVersion) + if err != nil { + existsError := &fscryptactions.ErrConfigFileExists{} + if errors.As(err, &existsError) { + log.ErrorLog(ctx, "fscrypt: config file %q already exists. Skipping fscrypt node setup", + existsError.Path) + + return nil + } + + return fmt.Errorf("fscrypt node init failed to create node configuration (/etc/fscrypt.conf): %w", + err) + } + + return nil +} + +// FscryptUnlock unlocks possilby creating fresh fscrypt metadata +// iff a volume is encrypted. Otherwise return immediately Calling +// this function requires that InitializeFscrypt ran once on this node. +func Unlock( + ctx context.Context, + volEncryption *util.VolumeEncryption, + stagingTargetPath string, volID string, +) error { + // Fetches keys from KMS. Do this first to catch KMS errors before setting up anything. + keyFn, err := createKeyFuncFromVolumeEncryption(ctx, *volEncryption, volID) + if err != nil { + log.ErrorLog(ctx, "fscrypt: could not create key function: %v", err) + + return err + } + + err = fscryptfilesystem.UpdateMountInfo() + if err != nil { + return err + } + + fscryptContext, err := fscryptactions.NewContextFromMountpoint(stagingTargetPath, nil) + if err != nil { + log.ErrorLog(ctx, "fscrypt: failed to create context from mountpoint %v: %w", stagingTargetPath, err) + + return err + } + + fscryptContext.Config.UseFsKeyringForV1Policies = true + + log.DebugLog(ctx, "fscrypt context: %+v", fscryptContext) + + if err = fscryptContext.Mount.CheckSupport(); err != nil { + log.ErrorLog(ctx, "fscrypt: filesystem mount %s does not support fscrypt", fscryptContext.Mount) + + return err + } + + // A proper set up fscrypy directory requires metadata and a kernel policy: + + // 1. Do we have a metadata directory (.fscrypt) set up? + metadataDirExists := false + if err = fscryptContext.Mount.Setup(fscryptfilesystem.SingleUserWritable); err != nil { + alreadySetupErr := &fscryptfilesystem.ErrAlreadySetup{} + if errors.As(err, &alreadySetupErr) { + log.DebugLog(ctx, "fscrypt: metadata directory in %q already set up", alreadySetupErr.Mount.Path) + metadataDirExists = true + } else { + log.ErrorLog(ctx, "fscrypt: mount setup failed: %v", err) + + return err + } + } + + encryptedPath := path.Join(stagingTargetPath, FscryptSubdir) + kernelPolicyExists := false + // 2. Ask the kernel if the directory has an fscrypt policy in place. + if _, err = fscryptmetadata.GetPolicy(encryptedPath); err == nil { // encrypted directory already set up + kernelPolicyExists = true + } + + if metadataDirExists != kernelPolicyExists { + return fmt.Errorf("fscrypt: unsupported state metadata=%t kernel_policy=%t", + metadataDirExists, kernelPolicyExists) + } + + protectorName := FscryptProtectorPrefix + + switch volEncryption.KMS.RequiresDEKStore() { + case kms.DEKStoreMetadata: + // Metadata style KMS use the KMS secret as a custom + // passphrase directly in fscrypt, circumenting key + // derivation on the CSI side to allow users to fall + // back on the fscrypt commandline tool easily + fscryptContext.Config.Source = fscryptmetadata.SourceType_custom_passphrase + case kms.DEKStoreIntegrated: + fscryptContext.Config.Source = fscryptmetadata.SourceType_raw_key + } + + if kernelPolicyExists && metadataDirExists { + log.DebugLog(ctx, "fscrypt: Encrypted directory already set up, policy exists") + + return unlockExisting(ctx, fscryptContext, encryptedPath, protectorName, keyFn) + } + + if !kernelPolicyExists && !metadataDirExists { + log.DebugLog(ctx, "fscrypt: Creating new protector and policy") + if volEncryption.KMS.RequiresDEKStore() == kms.DEKStoreIntegrated { + if err := volEncryption.StoreNewCryptoPassphrase(volID, encryptionPassphraseSize); err != nil { + log.ErrorLog(ctx, "fscrypt: store new crypto passphrase failed: %v", err) + + return err + } + } + + return initializeAndUnlock(ctx, fscryptContext, encryptedPath, protectorName, keyFn) + } + + return fmt.Errorf("unsupported") +} diff --git a/internal/util/getsecret_test.go b/internal/util/getsecret_test.go new file mode 100644 index 000000000..59e61e992 --- /dev/null +++ b/internal/util/getsecret_test.go @@ -0,0 +1,52 @@ +/* +Copyright 2022 The Ceph-CSI Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "errors" + "testing" + + kmsapi "github.com/ceph/ceph-csi/internal/kms" + + "github.com/stretchr/testify/assert" +) + +func TestGetPassphraseFromKMS(t *testing.T) { + t.Parallel() + + for _, provider := range kmsapi.GetKMSTestProvider() { + if provider.CreateTestDummy == nil { + continue + } + kms := kmsapi.GetKMSTestDummy(provider.UniqueID) + assert.NotNil(t, kms) + + volEnc, err := NewVolumeEncryption(provider.UniqueID, kms) + if errors.Is(err, ErrDEKStoreNeeded) { + _, err = volEnc.KMS.GetSecret("") + if errors.Is(err, kmsapi.ErrGetSecretUnsupported) { + continue // currently unsupported by fscrypt integration + } + } + assert.NotNil(t, volEnc) + + if kms.RequiresDEKStore() == kmsapi.DEKStoreIntegrated { + continue + } + + secret, err := kms.GetSecret("") + assert.NoError(t, err, provider.UniqueID) + assert.NotEmpty(t, secret, provider.UniqueID) + } +} diff --git a/scripts/mdl-style.rb b/scripts/mdl-style.rb index 3fdd9e00a..99a4f3d00 100644 --- a/scripts/mdl-style.rb +++ b/scripts/mdl-style.rb @@ -3,8 +3,13 @@ all #Refer below url for more information about the markdown rules. #https://github.com/markdownlint/markdownlint/blob/master/docs/RULES.md -rule 'MD013', :code_blocks => false, :tables => false, :line_length => 80 +rule 'MD013', :ignore_code_blocks => false, :tables => false, :line_length => 80 exclude_rule 'MD033' # In-line HTML: GitHub style markdown adds HTML tags exclude_rule 'MD040' # Fenced code blocks should have a language specified exclude_rule 'MD041' # First line in file should be a top level header +# TODO: Enable the rules after making required changes. +exclude_rule 'MD007' # Unordered list indentation +exclude_rule 'MD012' # Multiple consecutive blank lines +exclude_rule 'MD013' # Line length +exclude_rule 'MD047' # File should end with a single newline character \ No newline at end of file diff --git a/scripts/minikube.sh b/scripts/minikube.sh index 1def9a369..e864f95b7 100755 --- a/scripts/minikube.sh +++ b/scripts/minikube.sh @@ -180,6 +180,7 @@ function disable_storage_addons() { # configure minikube MINIKUBE_ARCH=${MINIKUBE_ARCH:-"amd64"} MINIKUBE_VERSION=${MINIKUBE_VERSION:-"latest"} +MINIKUBE_ISO_URL=${MINIKUBE_ISO_URL:-""} KUBE_VERSION=${KUBE_VERSION:-"latest"} CONTAINER_CMD=${CONTAINER_CMD:-"docker"} MEMORY=${MEMORY:-"4096"} @@ -206,6 +207,10 @@ else DISK_CONFIG="" fi +if [[ -n "${MINIKUBE_ISO_URL}" ]]; then + EXTRA_CONFIG="${EXTRA_CONFIG} --iso-url ${MINIKUBE_ISO_URL}" +fi + # configure csi image version CSI_IMAGE_VERSION=${CSI_IMAGE_VERSION:-"canary"} diff --git a/vendor/github.com/google/fscrypt/LICENSE b/vendor/github.com/google/fscrypt/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/google/fscrypt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/fscrypt/actions/callback.go b/vendor/github.com/google/fscrypt/actions/callback.go new file mode 100644 index 000000000..f15893db8 --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/callback.go @@ -0,0 +1,132 @@ +/* + * callback.go - defines how the caller of an action function passes along a key + * to be used in this package. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package actions + +import ( + "log" + + "github.com/pkg/errors" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/metadata" +) + +// ProtectorInfo is the information a caller will receive about a Protector +// before they have to return the corresponding key. This is currently a +// read-only view of metadata.ProtectorData. +type ProtectorInfo struct { + data *metadata.ProtectorData +} + +// Descriptor is the Protector's descriptor used to uniquely identify it. +func (pi *ProtectorInfo) Descriptor() string { return pi.data.GetProtectorDescriptor() } + +// Source indicates the type of the descriptor (how it should be unlocked). +func (pi *ProtectorInfo) Source() metadata.SourceType { return pi.data.GetSource() } + +// Name is used to describe custom passphrase and raw key descriptors. +func (pi *ProtectorInfo) Name() string { return pi.data.GetName() } + +// UID is used to identify the user for login passphrases. +func (pi *ProtectorInfo) UID() int64 { return pi.data.GetUid() } + +// KeyFunc is passed to a function that will require some type of key. +// The info parameter is provided so the callback knows which key to provide. +// The retry parameter indicates that a previous key provided by this callback +// was incorrect (this allows for user feedback like "incorrect passphrase"). +// +// For passphrase sources, the returned key should be a passphrase. For raw +// sources, the returned key should be a 256-bit cryptographic key. Consumers +// of the callback will wipe the returned key. An error returned by the callback +// will be propagated back to the caller. +type KeyFunc func(info ProtectorInfo, retry bool) (*crypto.Key, error) + +// getWrappingKey uses the provided callback to get the wrapping key +// corresponding to the ProtectorInfo. This runs the passphrase hash for +// passphrase sources or just relays the callback for raw sources. +func getWrappingKey(info ProtectorInfo, keyFn KeyFunc, retry bool) (*crypto.Key, error) { + // For raw key sources, we can just use the key directly. + if info.Source() == metadata.SourceType_raw_key { + return keyFn(info, retry) + } + + // Run the passphrase hash for other sources. + passphrase, err := keyFn(info, retry) + if err != nil { + return nil, err + } + defer passphrase.Wipe() + + log.Printf("running passphrase hash for protector %s", info.Descriptor()) + return crypto.PassphraseHash(passphrase, info.data.Salt, info.data.Costs) +} + +// unwrapProtectorKey uses the provided callback and ProtectorInfo to return +// the unwrapped protector key. This will repeatedly call keyFn to get the +// wrapping key until the correct key is returned by the callback or the +// callback returns an error. +func unwrapProtectorKey(info ProtectorInfo, keyFn KeyFunc) (*crypto.Key, error) { + retry := false + for { + wrappingKey, err := getWrappingKey(info, keyFn, retry) + if err != nil { + return nil, err + } + + protectorKey, err := crypto.Unwrap(wrappingKey, info.data.WrappedKey) + wrappingKey.Wipe() + + switch errors.Cause(err) { + case nil: + log.Printf("valid wrapping key for protector %s", info.Descriptor()) + return protectorKey, nil + case crypto.ErrBadAuth: + // After the first failure, we let the callback know we are retrying. + log.Printf("invalid wrapping key for protector %s", info.Descriptor()) + retry = true + continue + default: + return nil, err + } + } +} + +// ProtectorOption is information about a protector relative to a Policy. +type ProtectorOption struct { + ProtectorInfo + // LinkedMount is the mountpoint for a linked protector. It is nil if + // the protector is not a linked protector (or there is a LoadError). + LinkedMount *filesystem.Mount + // LoadError is non-nil if there was an error in getting the data for + // the protector. + LoadError error +} + +// OptionFunc is passed to a function that needs to unlock a Policy. +// The callback is used to specify which protector should be used to unlock a +// Policy. The descriptor indicates which Policy we are using, while the options +// correspond to the valid Protectors protecting the Policy. +// +// The OptionFunc should either return a valid index into options, which +// corresponds to the desired protector, or an error (which will be propagated +// back to the caller). +type OptionFunc func(policyDescriptor string, options []*ProtectorOption) (int, error) diff --git a/vendor/github.com/google/fscrypt/actions/config.go b/vendor/github.com/google/fscrypt/actions/config.go new file mode 100644 index 000000000..a8eb029db --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/config.go @@ -0,0 +1,293 @@ +/* + * config.go - Actions for creating a new config file, which includes new + * hashing costs and the config file's location. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package actions + +import ( + "bytes" + "fmt" + "log" + "os" + "runtime" + "time" + + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// ConfigFileLocation is the location of fscrypt's global settings. This can be +// overridden by the user of this package. +var ConfigFileLocation = "/etc/fscrypt.conf" + +// ErrBadConfig is an internal error that indicates that the config struct is invalid. +type ErrBadConfig struct { + Config *metadata.Config + UnderlyingError error +} + +func (err *ErrBadConfig) Error() string { + return fmt.Sprintf(`internal error: config is invalid: %s + + The invalid config is %s`, err.UnderlyingError, err.Config) +} + +// ErrBadConfigFile indicates that the config file is invalid. +type ErrBadConfigFile struct { + Path string + UnderlyingError error +} + +func (err *ErrBadConfigFile) Error() string { + return fmt.Sprintf("%q is invalid: %s", err.Path, err.UnderlyingError) +} + +// ErrConfigFileExists indicates that the config file already exists. +type ErrConfigFileExists struct { + Path string +} + +func (err *ErrConfigFileExists) Error() string { + return fmt.Sprintf("%q already exists", err.Path) +} + +// ErrNoConfigFile indicates that the config file doesn't exist. +type ErrNoConfigFile struct { + Path string +} + +func (err *ErrNoConfigFile) Error() string { + return fmt.Sprintf("%q doesn't exist", err.Path) +} + +const ( + // Permissions of the config file (global readable) + configPermissions = 0644 + // Config file should be created for writing and not already exist + createFlags = os.O_CREATE | os.O_WRONLY | os.O_EXCL + // 128 MiB is a large enough amount of memory to make the password hash + // very difficult to brute force on specialized hardware, but small + // enough to work on most GNU/Linux systems. + maxMemoryBytes = 128 * 1024 * 1024 +) + +var ( + timingPassphrase = []byte("I am a fake passphrase") + timingSalt = bytes.Repeat([]byte{42}, metadata.SaltLen) +) + +// CreateConfigFile creates a new config file at the appropriate location with +// the appropriate hashing costs and encryption parameters. The hashing will be +// configured to take as long as the specified time target. In addition, the +// version of encryption policy to use may be overridden from the default of v1. +func CreateConfigFile(target time.Duration, policyVersion int64) error { + // Create the config file before computing the hashing costs, so we fail + // immediately if the program has insufficient permissions. + configFile, err := filesystem.OpenFileOverridingUmask(ConfigFileLocation, + createFlags, configPermissions) + switch { + case os.IsExist(err): + return &ErrConfigFileExists{ConfigFileLocation} + case err != nil: + return err + } + defer configFile.Close() + + config := &metadata.Config{ + Source: metadata.DefaultSource, + Options: metadata.DefaultOptions, + } + + if policyVersion != 0 { + config.Options.PolicyVersion = policyVersion + } + + if config.HashCosts, err = getHashingCosts(target); err != nil { + return err + } + + log.Printf("Creating config at %q with %v\n", ConfigFileLocation, config) + return metadata.WriteConfig(config, configFile) +} + +// getConfig returns the current configuration struct. Any fields not specified +// in the config file use the system defaults. An error is returned if the +// config file hasn't been setup with CreateConfigFile yet or the config +// contains invalid data. +func getConfig() (*metadata.Config, error) { + configFile, err := os.Open(ConfigFileLocation) + switch { + case os.IsNotExist(err): + return nil, &ErrNoConfigFile{ConfigFileLocation} + case err != nil: + return nil, err + } + defer configFile.Close() + + log.Printf("Reading config from %q\n", ConfigFileLocation) + config, err := metadata.ReadConfig(configFile) + if err != nil { + return nil, &ErrBadConfigFile{ConfigFileLocation, err} + } + + // Use system defaults if not specified + if config.Source == metadata.SourceType_default { + config.Source = metadata.DefaultSource + log.Printf("Falling back to source of %q", config.Source.String()) + } + if config.Options.Padding == 0 { + config.Options.Padding = metadata.DefaultOptions.Padding + log.Printf("Falling back to padding of %d", config.Options.Padding) + } + if config.Options.Contents == metadata.EncryptionOptions_default { + config.Options.Contents = metadata.DefaultOptions.Contents + log.Printf("Falling back to contents mode of %q", config.Options.Contents) + } + if config.Options.Filenames == metadata.EncryptionOptions_default { + config.Options.Filenames = metadata.DefaultOptions.Filenames + log.Printf("Falling back to filenames mode of %q", config.Options.Filenames) + } + if config.Options.PolicyVersion == 0 { + config.Options.PolicyVersion = metadata.DefaultOptions.PolicyVersion + log.Printf("Falling back to policy version of %d", config.Options.PolicyVersion) + } + + if err := config.CheckValidity(); err != nil { + return nil, &ErrBadConfigFile{ConfigFileLocation, err} + } + + return config, nil +} + +// getHashingCosts returns hashing costs so that hashing a password will take +// approximately the target time. This is done using the total amount of RAM, +// the number of CPUs present, and by running the passphrase hash many times. +func getHashingCosts(target time.Duration) (*metadata.HashingCosts, error) { + log.Printf("Finding hashing costs that take %v\n", target) + + // Start out with the minimal possible costs that use all the CPUs. + nCPUs := int64(runtime.NumCPU()) + costs := &metadata.HashingCosts{ + Time: 1, + Memory: 8 * nCPUs, + Parallelism: nCPUs, + } + + // If even the minimal costs are not fast enough, just return the + // minimal costs and log a warning. + t, err := timeHashingCosts(costs) + if err != nil { + return nil, err + } + log.Printf("Min Costs={%v}\t-> %v\n", costs, t) + + if t > target { + log.Printf("time exceeded the target of %v.\n", target) + return costs, nil + } + + // Now we start doubling the costs until we reach the target. + memoryKiBLimit := memoryBytesLimit() / 1024 + for { + // Store a copy of the previous costs + costsPrev := *costs + tPrev := t + + // Double the memory up to the max, then double the time. + if costs.Memory < memoryKiBLimit { + costs.Memory = util.MinInt64(2*costs.Memory, memoryKiBLimit) + } else { + costs.Time *= 2 + } + + // If our hashing failed, return the last good set of costs. + if t, err = timeHashingCosts(costs); err != nil { + log.Printf("Hashing with costs={%v} failed: %v\n", costs, err) + return &costsPrev, nil + } + log.Printf("Costs={%v}\t-> %v\n", costs, t) + + // If we have reached the target time, we return a set of costs + // based on the linear interpolation between the last two times. + if t >= target { + f := float64(target-tPrev) / float64(t-tPrev) + return &metadata.HashingCosts{ + Time: betweenCosts(costsPrev.Time, costs.Time, f), + Memory: betweenCosts(costsPrev.Memory, costs.Memory, f), + Parallelism: costs.Parallelism, + }, nil + } + } +} + +// memoryBytesLimit returns the maximum amount of memory we will use for +// passphrase hashing. This will never be more than a reasonable maximum (for +// compatibility) or an 8th the available system RAM. +func memoryBytesLimit() int64 { + // The sysinfo syscall only fails if given a bad address + var info unix.Sysinfo_t + err := unix.Sysinfo(&info) + util.NeverError(err) + + totalRAMBytes := int64(info.Totalram) + return util.MinInt64(totalRAMBytes/8, maxMemoryBytes) +} + +// betweenCosts returns a cost between a and b. Specifically, it returns the +// floor of a + f*(b-a). This way, f=0 returns a and f=1 returns b. +func betweenCosts(a, b int64, f float64) int64 { + return a + int64(f*float64(b-a)) +} + +// timeHashingCosts runs the passphrase hash with the specified costs and +// returns the time it takes to hash the passphrase. +func timeHashingCosts(costs *metadata.HashingCosts) (time.Duration, error) { + passphrase, err := crypto.NewKeyFromReader(bytes.NewReader(timingPassphrase)) + if err != nil { + return 0, err + } + defer passphrase.Wipe() + + // Be sure to measure CPU time, not wall time (time.Now) + begin := cpuTimeInNanoseconds() + hash, err := crypto.PassphraseHash(passphrase, timingSalt, costs) + if err == nil { + hash.Wipe() + } + end := cpuTimeInNanoseconds() + + // This uses a lot of memory, run the garbage collector + runtime.GC() + + return time.Duration((end - begin) / costs.Parallelism), nil +} + +// cpuTimeInNanoseconds returns the nanosecond count based on the process's CPU usage. +// This number has no absolute meaning, only relative meaning to other calls. +func cpuTimeInNanoseconds() int64 { + var ts unix.Timespec + err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts) + // ClockGettime fails if given a bad address or on a VERY old system. + util.NeverError(err) + return unix.TimespecToNsec(ts) +} diff --git a/vendor/github.com/google/fscrypt/actions/context.go b/vendor/github.com/google/fscrypt/actions/context.go new file mode 100644 index 000000000..ac3f6d304 --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/context.go @@ -0,0 +1,184 @@ +/* + * context.go - top-level interface to fscrypt packages + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package actions is the high-level interface to the fscrypt packages. The +// functions here roughly correspond with commands for the tool in cmd/fscrypt. +// All of the actions include a significant amount of logging, so that good +// output can be provided for cmd/fscrypt's verbose mode. +// The top-level actions currently include: +// - Creating a new config file +// - Creating a context on which to perform actions +// - Creating, unlocking, and modifying Protectors +// - Creating, unlocking, and modifying Policies +package actions + +import ( + "log" + "os/user" + + "github.com/pkg/errors" + + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/keyring" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// ErrLocked indicates that the key hasn't been unwrapped yet. +var ErrLocked = errors.New("key needs to be unlocked first") + +// Context contains the necessary global state to perform most of fscrypt's +// actions. +type Context struct { + // Config is the struct loaded from the global config file. It can be + // modified after being loaded to customise parameters. + Config *metadata.Config + // Mount is the filesystem relative to which all Protectors and Policies + // are added, edited, removed, and applied, and to which policies using + // the filesystem keyring are provisioned. + Mount *filesystem.Mount + // TargetUser is the user for whom protectors are created, and to whose + // keyring policies using the user keyring are provisioned. It's also + // the user for whom the keys are claimed in the filesystem keyring when + // v2 policies are provisioned. + TargetUser *user.User + // TrustedUser is the user for whom policies and protectors are allowed + // to be read. Specifically, if TrustedUser is set, then only + // policies and protectors owned by TrustedUser or by root will be + // allowed to be read. If it's nil, then all policies and protectors + // the process has filesystem-level read access to will be allowed. + TrustedUser *user.User +} + +// NewContextFromPath makes a context for the filesystem containing the +// specified path and whose Config is loaded from the global config file. On +// success, the Context contains a valid Config and Mount. The target user +// defaults to the current effective user if none is specified. +func NewContextFromPath(path string, targetUser *user.User) (*Context, error) { + ctx, err := newContextFromUser(targetUser) + if err != nil { + return nil, err + } + if ctx.Mount, err = filesystem.FindMount(path); err != nil { + return nil, err + } + + log.Printf("%s is on %s filesystem %q (%s)", path, + ctx.Mount.FilesystemType, ctx.Mount.Path, ctx.Mount.Device) + return ctx, nil +} + +// NewContextFromMountpoint makes a context for the filesystem at the specified +// mountpoint and whose Config is loaded from the global config file. On +// success, the Context contains a valid Config and Mount. The target user +// defaults to the current effective user if none is specified. +func NewContextFromMountpoint(mountpoint string, targetUser *user.User) (*Context, error) { + ctx, err := newContextFromUser(targetUser) + if err != nil { + return nil, err + } + if ctx.Mount, err = filesystem.GetMount(mountpoint); err != nil { + return nil, err + } + + log.Printf("found %s filesystem %q (%s)", ctx.Mount.FilesystemType, + ctx.Mount.Path, ctx.Mount.Device) + return ctx, nil +} + +// newContextFromUser makes a context with the corresponding target user, and +// whose Config is loaded from the global config file. If the target user is +// nil, the effective user is used. +func newContextFromUser(targetUser *user.User) (*Context, error) { + var err error + if targetUser == nil { + if targetUser, err = util.EffectiveUser(); err != nil { + return nil, err + } + } + + ctx := &Context{TargetUser: targetUser} + if ctx.Config, err = getConfig(); err != nil { + return nil, err + } + + // By default, when running as a non-root user we only read policies and + // protectors owned by the user or root. When running as root, we allow + // reading all policies and protectors. + if !ctx.Config.GetAllowCrossUserMetadata() && !util.IsUserRoot() { + ctx.TrustedUser, err = util.EffectiveUser() + if err != nil { + return nil, err + } + } + + log.Printf("creating context for user %q", targetUser.Username) + return ctx, nil +} + +// checkContext verifies that the context contains a valid config and a mount +// which is being used with fscrypt. +func (ctx *Context) checkContext() error { + if err := ctx.Config.CheckValidity(); err != nil { + return &ErrBadConfig{ctx.Config, err} + } + return ctx.Mount.CheckSetup(ctx.TrustedUser) +} + +func (ctx *Context) getKeyringOptions() *keyring.Options { + return &keyring.Options{ + Mount: ctx.Mount, + User: ctx.TargetUser, + UseFsKeyringForV1Policies: ctx.Config.GetUseFsKeyringForV1Policies(), + } +} + +// getProtectorOption returns the ProtectorOption for the protector on the +// context's mountpoint with the specified descriptor. +func (ctx *Context) getProtectorOption(protectorDescriptor string) *ProtectorOption { + mnt, data, err := ctx.Mount.GetProtector(protectorDescriptor, ctx.TrustedUser) + if err != nil { + return &ProtectorOption{ProtectorInfo{}, nil, err} + } + + info := ProtectorInfo{data} + // No linked path if on the same mountpoint + if mnt == ctx.Mount { + return &ProtectorOption{info, nil, nil} + } + return &ProtectorOption{info, mnt, nil} +} + +// ProtectorOptions creates a slice of all the options for all of the Protectors +// on the Context's mountpoint. +func (ctx *Context) ProtectorOptions() ([]*ProtectorOption, error) { + if err := ctx.checkContext(); err != nil { + return nil, err + } + descriptors, err := ctx.Mount.ListProtectors(ctx.TrustedUser) + if err != nil { + return nil, err + } + + options := make([]*ProtectorOption, len(descriptors)) + for i, descriptor := range descriptors { + options[i] = ctx.getProtectorOption(descriptor) + } + return options, nil +} diff --git a/vendor/github.com/google/fscrypt/actions/policy.go b/vendor/github.com/google/fscrypt/actions/policy.go new file mode 100644 index 000000000..3b2017693 --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/policy.go @@ -0,0 +1,622 @@ +/* + * policy.go - functions for dealing with policies + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package actions + +import ( + "fmt" + "log" + "os" + "os/user" + + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/keyring" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// ErrAccessDeniedPossiblyV2 indicates that a directory's encryption policy +// couldn't be retrieved due to "permission denied", but it looks like it's due +// to the directory using a v2 policy but the kernel not supporting it. +type ErrAccessDeniedPossiblyV2 struct { + DirPath string +} + +func (err *ErrAccessDeniedPossiblyV2) Error() string { + return fmt.Sprintf(` + failed to get encryption policy of %s: permission denied + + This may be caused by the directory using a v2 encryption policy and the + current kernel not supporting it. If indeed the case, then this + directory can only be used on kernel v5.4 and later. You can create + directories accessible on older kernels by changing policy_version to 1 + in %s.`, + err.DirPath, ConfigFileLocation) +} + +// ErrAlreadyProtected indicates that a policy is already protected by the given +// protector. +type ErrAlreadyProtected struct { + Policy *Policy + Protector *Protector +} + +func (err *ErrAlreadyProtected) Error() string { + return fmt.Sprintf("policy %s is already protected by protector %s", + err.Policy.Descriptor(), err.Protector.Descriptor()) +} + +// ErrDifferentFilesystem indicates that a policy can't be applied to a +// directory on a different filesystem. +type ErrDifferentFilesystem struct { + PolicyMount *filesystem.Mount + PathMount *filesystem.Mount +} + +func (err *ErrDifferentFilesystem) Error() string { + return fmt.Sprintf(`cannot apply policy from filesystem %q to a + directory on filesystem %q. Policies may only protect files on the same + filesystem.`, err.PolicyMount.Path, err.PathMount.Path) +} + +// ErrMissingPolicyMetadata indicates that a directory is encrypted but its +// policy metadata cannot be found. +type ErrMissingPolicyMetadata struct { + Mount *filesystem.Mount + DirPath string + Descriptor string +} + +func (err *ErrMissingPolicyMetadata) Error() string { + return fmt.Sprintf(`filesystem %q does not contain the policy metadata + for %q. This directory has either been encrypted with another tool (such + as e4crypt), or the file %q has been deleted.`, + err.Mount.Path, err.DirPath, + err.Mount.PolicyPath(err.Descriptor)) +} + +// ErrNotProtected indicates that the given policy is not protected by the given +// protector. +type ErrNotProtected struct { + PolicyDescriptor string + ProtectorDescriptor string +} + +func (err *ErrNotProtected) Error() string { + return fmt.Sprintf(`policy %s is not protected by protector %s`, + err.PolicyDescriptor, err.ProtectorDescriptor) +} + +// ErrOnlyProtector indicates that the last protector can't be removed from a +// policy. +type ErrOnlyProtector struct { + Policy *Policy +} + +func (err *ErrOnlyProtector) Error() string { + return fmt.Sprintf(`cannot remove the only protector from policy %s. A + policy must have at least one protector.`, err.Policy.Descriptor()) +} + +// ErrPolicyMetadataMismatch indicates that the policy metadata for an encrypted +// directory is inconsistent with that directory. +type ErrPolicyMetadataMismatch struct { + DirPath string + Mount *filesystem.Mount + PathData *metadata.PolicyData + MountData *metadata.PolicyData +} + +func (err *ErrPolicyMetadataMismatch) Error() string { + return fmt.Sprintf(`inconsistent metadata between encrypted directory %q + and its corresponding metadata file %q. + + Directory has descriptor:%s %s + + Metadata file has descriptor:%s %s`, + err.DirPath, err.Mount.PolicyPath(err.PathData.KeyDescriptor), + err.PathData.KeyDescriptor, err.PathData.Options, + err.MountData.KeyDescriptor, err.MountData.Options) +} + +// PurgeAllPolicies removes all policy keys on the filesystem from the kernel +// keyring. In order for this to fully take effect, the filesystem may also need +// to be unmounted or caches dropped. +func PurgeAllPolicies(ctx *Context) error { + if err := ctx.checkContext(); err != nil { + return err + } + policies, err := ctx.Mount.ListPolicies(nil) + if err != nil { + return err + } + + for _, policyDescriptor := range policies { + err = keyring.RemoveEncryptionKey(policyDescriptor, ctx.getKeyringOptions(), false) + switch errors.Cause(err) { + case nil, keyring.ErrKeyNotPresent: + // We don't care if the key has already been removed + case keyring.ErrKeyFilesOpen: + log.Printf("Key for policy %s couldn't be fully removed because some files are still in-use", + policyDescriptor) + case keyring.ErrKeyAddedByOtherUsers: + log.Printf("Key for policy %s couldn't be fully removed because other user(s) have added it too", + policyDescriptor) + default: + return err + } + } + return nil +} + +// Policy represents an unlocked policy, so it contains the PolicyData as well +// as the actual protector key. These unlocked Polices can then be applied to a +// directory, or have their key material inserted into the keyring (which will +// allow encrypted files to be accessed). As with the key struct, a Policy +// should be wiped after use. +type Policy struct { + Context *Context + data *metadata.PolicyData + key *crypto.Key + created bool + ownerIfCreating *user.User + newLinkedProtectors []string +} + +// CreatePolicy creates a Policy protected by given Protector and stores the +// appropriate data on the filesystem. On error, no data is changed on the +// filesystem. +func CreatePolicy(ctx *Context, protector *Protector) (*Policy, error) { + if err := ctx.checkContext(); err != nil { + return nil, err + } + // Randomly create the underlying policy key (and wipe if we fail) + key, err := crypto.NewRandomKey(metadata.PolicyKeyLen) + if err != nil { + return nil, err + } + + keyDescriptor, err := crypto.ComputeKeyDescriptor(key, ctx.Config.Options.PolicyVersion) + if err != nil { + key.Wipe() + return nil, err + } + + policy := &Policy{ + Context: ctx, + data: &metadata.PolicyData{ + Options: ctx.Config.Options, + KeyDescriptor: keyDescriptor, + }, + key: key, + created: true, + } + + policy.ownerIfCreating, err = getOwnerOfMetadataForProtector(protector) + if err != nil { + policy.Lock() + return nil, err + } + + if err = policy.AddProtector(protector); err != nil { + policy.Lock() + return nil, err + } + + return policy, nil +} + +// GetPolicy retrieves a locked policy with a specific descriptor. The Policy is +// still locked in this case, so it must be unlocked before using certain +// methods. +func GetPolicy(ctx *Context, descriptor string) (*Policy, error) { + if err := ctx.checkContext(); err != nil { + return nil, err + } + data, err := ctx.Mount.GetPolicy(descriptor, ctx.TrustedUser) + if err != nil { + return nil, err + } + log.Printf("got data for %s from %q", descriptor, ctx.Mount.Path) + + return &Policy{Context: ctx, data: data}, nil +} + +// GetPolicyFromPath returns the locked policy descriptor for a file on the +// filesystem. The Policy is still locked in this case, so it must be unlocked +// before using certain methods. An error is returned if the metadata is +// inconsistent or the path is not encrypted. +func GetPolicyFromPath(ctx *Context, path string) (*Policy, error) { + if err := ctx.checkContext(); err != nil { + return nil, err + } + + // We double check that the options agree for both the data we get from + // the path, and the data we get from the mountpoint. + pathData, err := metadata.GetPolicy(path) + err = ctx.Mount.EncryptionSupportError(err) + if err != nil { + // On kernels that don't support v2 encryption policies, trying + // to open a directory with a v2 policy simply gave EACCES. This + // is ambiguous with other errors, but try to detect this case + // and show a better error message. + if os.IsPermission(err) && + filesystem.HaveReadAccessTo(path) && + !keyring.IsFsKeyringSupported(ctx.Mount) { + return nil, &ErrAccessDeniedPossiblyV2{path} + } + return nil, err + } + descriptor := pathData.KeyDescriptor + log.Printf("found policy %s for %q", descriptor, path) + + mountData, err := ctx.Mount.GetPolicy(descriptor, ctx.TrustedUser) + if err != nil { + log.Printf("getting policy metadata: %v", err) + if _, ok := err.(*filesystem.ErrPolicyNotFound); ok { + return nil, &ErrMissingPolicyMetadata{ctx.Mount, path, descriptor} + } + return nil, err + } + log.Printf("found data for policy %s on %q", descriptor, ctx.Mount.Path) + + if !proto.Equal(pathData.Options, mountData.Options) || + pathData.KeyDescriptor != mountData.KeyDescriptor { + return nil, &ErrPolicyMetadataMismatch{path, ctx.Mount, pathData, mountData} + } + log.Print("data from filesystem and path agree") + + return &Policy{Context: ctx, data: mountData}, nil +} + +// ProtectorOptions creates a slice of ProtectorOptions for the protectors +// protecting this policy. +func (policy *Policy) ProtectorOptions() []*ProtectorOption { + options := make([]*ProtectorOption, len(policy.data.WrappedPolicyKeys)) + for i, wrappedPolicyKey := range policy.data.WrappedPolicyKeys { + options[i] = policy.Context.getProtectorOption(wrappedPolicyKey.ProtectorDescriptor) + } + return options +} + +// ProtectorDescriptors creates a slice of the Protector descriptors for the +// protectors protecting this policy. +func (policy *Policy) ProtectorDescriptors() []string { + descriptors := make([]string, len(policy.data.WrappedPolicyKeys)) + for i, wrappedPolicyKey := range policy.data.WrappedPolicyKeys { + descriptors[i] = wrappedPolicyKey.ProtectorDescriptor + } + return descriptors +} + +// Descriptor returns the key descriptor for this policy. +func (policy *Policy) Descriptor() string { + return policy.data.KeyDescriptor +} + +// Options returns the encryption options of this policy. +func (policy *Policy) Options() *metadata.EncryptionOptions { + return policy.data.Options +} + +// Version returns the version of this policy. +func (policy *Policy) Version() int64 { + return policy.data.Options.PolicyVersion +} + +// Destroy removes a policy from the filesystem. It also removes any new +// protector links that were created for the policy. This does *not* wipe the +// policy's internal key from memory; use Lock() to do that. +func (policy *Policy) Destroy() error { + for _, protectorDescriptor := range policy.newLinkedProtectors { + policy.Context.Mount.RemoveProtector(protectorDescriptor) + } + return policy.Context.Mount.RemovePolicy(policy.Descriptor()) +} + +// Revert destroys a policy if it was created, but does nothing if it was just +// queried from the filesystem. +func (policy *Policy) Revert() error { + if !policy.created { + return nil + } + return policy.Destroy() +} + +func (policy *Policy) String() string { + return fmt.Sprintf("Policy: %s\nMountpoint: %s\nOptions: %v\nProtectors:%+v", + policy.Descriptor(), policy.Context.Mount, policy.data.Options, + policy.ProtectorDescriptors()) +} + +// Unlock unwraps the Policy's internal key. As a Protector is needed to unlock +// the Policy, callbacks to select the Policy and get the key are needed. This +// method will retry the keyFn as necessary to get the correct key for the +// selected protector. Does nothing if policy is already unlocked. +func (policy *Policy) Unlock(optionFn OptionFunc, keyFn KeyFunc) error { + if policy.key != nil { + return nil + } + options := policy.ProtectorOptions() + + // The OptionFunc indicates which option and wrapped key we should use. + idx, err := optionFn(policy.Descriptor(), options) + if err != nil { + return err + } + option := options[idx] + if option.LoadError != nil { + return option.LoadError + } + + log.Printf("protector %s selected in callback", option.Descriptor()) + protectorKey, err := unwrapProtectorKey(option.ProtectorInfo, keyFn) + if err != nil { + return err + } + defer protectorKey.Wipe() + + log.Printf("unwrapping policy %s with protector", policy.Descriptor()) + wrappedPolicyKey := policy.data.WrappedPolicyKeys[idx].WrappedKey + policy.key, err = crypto.Unwrap(protectorKey, wrappedPolicyKey) + return err +} + +// UnlockWithProtector uses an unlocked Protector to unlock a policy. An error +// is returned if the Protector is not yet unlocked or does not protect the +// policy. Does nothing if policy is already unlocked. +func (policy *Policy) UnlockWithProtector(protector *Protector) error { + if policy.key != nil { + return nil + } + if protector.key == nil { + return ErrLocked + } + idx, ok := policy.findWrappedKeyIndex(protector.Descriptor()) + if !ok { + return &ErrNotProtected{policy.Descriptor(), protector.Descriptor()} + } + + var err error + wrappedPolicyKey := policy.data.WrappedPolicyKeys[idx].WrappedKey + policy.key, err = crypto.Unwrap(protector.key, wrappedPolicyKey) + return err +} + +// Lock wipes a Policy's internal Key. It should always be called after using a +// Policy. This is often done with a defer statement. There is no effect if +// called multiple times. +func (policy *Policy) Lock() error { + err := policy.key.Wipe() + policy.key = nil + return err +} + +// UsesProtector returns if the policy is protected with the protector +func (policy *Policy) UsesProtector(protector *Protector) bool { + _, ok := policy.findWrappedKeyIndex(protector.Descriptor()) + return ok +} + +// getOwnerOfMetadataForProtector returns the User to whom the owner of any new +// policies or protector links for the given protector should be set. +// +// This will return a non-nil value only when the protector is a login protector +// and the process is running as root. In this scenario, root is setting up +// encryption on the user's behalf, so we need to make new policies and +// protector links owned by the user (rather than root) to allow them to be read +// by the user, just like the login protector itself which is handled elsewhere. +func getOwnerOfMetadataForProtector(protector *Protector) (*user.User, error) { + if protector.data.Source == metadata.SourceType_pam_passphrase && util.IsUserRoot() { + owner, err := util.UserFromUID(protector.data.Uid) + if err != nil { + return nil, err + } + return owner, nil + } + return nil, nil +} + +// AddProtector updates the data that is wrapping the Policy Key so that the +// provided Protector is now protecting the specified Policy. If an error is +// returned, no data has been changed. If the policy and protector are on +// different filesystems, a link will be created between them. The policy and +// protector must both be unlocked. +func (policy *Policy) AddProtector(protector *Protector) error { + if policy.UsesProtector(protector) { + return &ErrAlreadyProtected{policy, protector} + } + if policy.key == nil || protector.key == nil { + return ErrLocked + } + + // If the protector is on a different filesystem, we need to add a link + // to it on the policy's filesystem. + if policy.Context.Mount != protector.Context.Mount { + log.Printf("policy on %s\n protector on %s\n", policy.Context.Mount, protector.Context.Mount) + ownerIfCreating, err := getOwnerOfMetadataForProtector(protector) + if err != nil { + return err + } + isNewLink, err := policy.Context.Mount.AddLinkedProtector( + protector.Descriptor(), protector.Context.Mount, + protector.Context.TrustedUser, ownerIfCreating) + if err != nil { + return err + } + if isNewLink { + policy.newLinkedProtectors = append(policy.newLinkedProtectors, + protector.Descriptor()) + } + } else { + log.Printf("policy and protector both on %q", policy.Context.Mount) + } + + // Create the wrapped policy key + wrappedKey, err := crypto.Wrap(protector.key, policy.key) + if err != nil { + return err + } + + // Append the wrapped key to the data + policy.addKey(&metadata.WrappedPolicyKey{ + ProtectorDescriptor: protector.Descriptor(), + WrappedKey: wrappedKey, + }) + + if err := policy.commitData(); err != nil { + // revert the addition on failure + policy.removeKey(len(policy.data.WrappedPolicyKeys) - 1) + return err + } + return nil +} + +// RemoveProtector updates the data that is wrapping the Policy Key so that the +// protector with the given descriptor is no longer protecting the specified +// Policy. If an error is returned, no data has been changed. Note that the +// protector itself won't be removed, nor will a link to the protector be +// removed (in the case where the protector and policy are on different +// filesystems). The policy can be locked or unlocked. +func (policy *Policy) RemoveProtector(protectorDescriptor string) error { + idx, ok := policy.findWrappedKeyIndex(protectorDescriptor) + if !ok { + return &ErrNotProtected{policy.Descriptor(), protectorDescriptor} + } + + if len(policy.data.WrappedPolicyKeys) == 1 { + return &ErrOnlyProtector{policy} + } + + // Remove the wrapped key from the data + toRemove := policy.removeKey(idx) + + if err := policy.commitData(); err != nil { + // revert the removal on failure (order is irrelevant) + policy.addKey(toRemove) + return err + } + return nil +} + +// Apply sets the Policy on a specified directory. Currently we impose the +// additional constraint that policies and the directories they are applied to +// must reside on the same filesystem. +func (policy *Policy) Apply(path string) error { + if pathMount, err := filesystem.FindMount(path); err != nil { + return err + } else if pathMount != policy.Context.Mount { + return &ErrDifferentFilesystem{policy.Context.Mount, pathMount} + } + + err := metadata.SetPolicy(path, policy.data) + return policy.Context.Mount.EncryptionSupportError(err) +} + +// GetProvisioningStatus returns the status of this policy's key in the keyring. +func (policy *Policy) GetProvisioningStatus() keyring.KeyStatus { + status, _ := keyring.GetEncryptionKeyStatus(policy.Descriptor(), + policy.Context.getKeyringOptions()) + return status +} + +// IsProvisionedByTargetUser returns true if the policy's key is present in the +// target kernel keyring, but not if that keyring is a filesystem keyring and +// the key only been added by users other than Context.TargetUser. +func (policy *Policy) IsProvisionedByTargetUser() bool { + return policy.GetProvisioningStatus() == keyring.KeyPresent +} + +// Provision inserts the Policy key into the kernel keyring. This allows reading +// and writing of files encrypted with this directory. Requires unlocked Policy. +func (policy *Policy) Provision() error { + if policy.key == nil { + return ErrLocked + } + return keyring.AddEncryptionKey(policy.key, policy.Descriptor(), + policy.Context.getKeyringOptions()) +} + +// Deprovision removes the Policy key from the kernel keyring. This prevents +// reading and writing to the directory --- unless the target keyring is a user +// keyring, in which case caches must be dropped too. If the Policy key was +// already removed, returns keyring.ErrKeyNotPresent. +func (policy *Policy) Deprovision(allUsers bool) error { + return keyring.RemoveEncryptionKey(policy.Descriptor(), + policy.Context.getKeyringOptions(), allUsers) +} + +// NeedsUserKeyring returns true if Provision and Deprovision for this policy +// will use a user keyring (deprecated), not a filesystem keyring. +func (policy *Policy) NeedsUserKeyring() bool { + return policy.Version() == 1 && !policy.Context.Config.GetUseFsKeyringForV1Policies() +} + +// NeedsRootToProvision returns true if Provision and Deprovision will require +// root for this policy in the current configuration. +func (policy *Policy) NeedsRootToProvision() bool { + return policy.Version() == 1 && policy.Context.Config.GetUseFsKeyringForV1Policies() +} + +// CanBeAppliedWithoutProvisioning returns true if this process can apply this +// policy to a directory without first calling Provision. +func (policy *Policy) CanBeAppliedWithoutProvisioning() bool { + return policy.Version() == 1 || util.IsUserRoot() +} + +// commitData writes the Policy's current data to the filesystem. +func (policy *Policy) commitData() error { + return policy.Context.Mount.AddPolicy(policy.data, policy.ownerIfCreating) +} + +// findWrappedPolicyKey returns the index of the wrapped policy key +// corresponding to this policy and protector. The returned bool is false if no +// wrapped policy key corresponds to the specified protector, true otherwise. +func (policy *Policy) findWrappedKeyIndex(protectorDescriptor string) (int, bool) { + for idx, wrappedPolicyKey := range policy.data.WrappedPolicyKeys { + if wrappedPolicyKey.ProtectorDescriptor == protectorDescriptor { + return idx, true + } + } + return 0, false +} + +// addKey adds the wrapped policy key to end of the wrapped key data. +func (policy *Policy) addKey(toAdd *metadata.WrappedPolicyKey) { + policy.data.WrappedPolicyKeys = append(policy.data.WrappedPolicyKeys, toAdd) +} + +// removeKey removes the wrapped policy key at the specified index. This +// does not preserve the order of the wrapped policy key array. If no index is +// specified the last key is removed. +func (policy *Policy) removeKey(index int) *metadata.WrappedPolicyKey { + lastIdx := len(policy.data.WrappedPolicyKeys) - 1 + toRemove := policy.data.WrappedPolicyKeys[index] + + // See https://github.com/golang/go/wiki/SliceTricks + policy.data.WrappedPolicyKeys[index] = policy.data.WrappedPolicyKeys[lastIdx] + policy.data.WrappedPolicyKeys[lastIdx] = nil + policy.data.WrappedPolicyKeys = policy.data.WrappedPolicyKeys[:lastIdx] + + return toRemove +} diff --git a/vendor/github.com/google/fscrypt/actions/protector.go b/vendor/github.com/google/fscrypt/actions/protector.go new file mode 100644 index 000000000..b986eb020 --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/protector.go @@ -0,0 +1,300 @@ +/* + * protector.go - functions for dealing with protectors + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package actions + +import ( + "fmt" + "log" + "os/user" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// LoginProtectorMountpoint is the mountpoint where login protectors are stored. +// This can be overridden by the user of this package. +var LoginProtectorMountpoint = "/" + +// ErrLoginProtectorExists indicates that a user already has a login protector. +type ErrLoginProtectorExists struct { + User *user.User +} + +func (err *ErrLoginProtectorExists) Error() string { + return fmt.Sprintf("user %q already has a login protector", err.User.Username) +} + +// ErrLoginProtectorName indicates that a name was given for a login protector. +type ErrLoginProtectorName struct { + Name string + User *user.User +} + +func (err *ErrLoginProtectorName) Error() string { + return fmt.Sprintf(`cannot assign name %q to new login protector for + user %q because login protectors are identified by user, not by name.`, + err.Name, err.User.Username) +} + +// ErrMissingProtectorName indicates that a protector name is needed. +type ErrMissingProtectorName struct { + Source metadata.SourceType +} + +func (err *ErrMissingProtectorName) Error() string { + return fmt.Sprintf("%s protectors must be named", err.Source) +} + +// ErrProtectorNameExists indicates that a protector name already exists. +type ErrProtectorNameExists struct { + Name string +} + +func (err *ErrProtectorNameExists) Error() string { + return fmt.Sprintf("there is already a protector named %q", err.Name) +} + +// checkForProtectorWithName returns an error if there is already a protector +// on the filesystem with a specific name (or if we cannot read the necessary +// data). +func checkForProtectorWithName(ctx *Context, name string) error { + options, err := ctx.ProtectorOptions() + if err != nil { + return err + } + for _, option := range options { + if option.Name() == name { + return &ErrProtectorNameExists{name} + } + } + return nil +} + +// checkIfUserHasLoginProtector returns an error if there is already a login +// protector on the filesystem for a specific user (or if we cannot read the +// necessary data). +func checkIfUserHasLoginProtector(ctx *Context, uid int64) error { + options, err := ctx.ProtectorOptions() + if err != nil { + return err + } + for _, option := range options { + if option.Source() == metadata.SourceType_pam_passphrase && option.UID() == uid { + return &ErrLoginProtectorExists{ctx.TargetUser} + } + } + return nil +} + +// Protector represents an unlocked protector, so it contains the ProtectorData +// as well as the actual protector key. These unlocked Protectors are necessary +// to unlock policies and create new polices. As with the key struct, a +// Protector should be wiped after use. +type Protector struct { + Context *Context + data *metadata.ProtectorData + key *crypto.Key + created bool + ownerIfCreating *user.User +} + +// CreateProtector creates an unlocked protector with a given name (name only +// needed for custom and raw protector types). The keyFn provided to create the +// Protector key will only be called once. If an error is returned, no data has +// been changed on the filesystem. +func CreateProtector(ctx *Context, name string, keyFn KeyFunc, owner *user.User) (*Protector, error) { + if err := ctx.checkContext(); err != nil { + return nil, err + } + // Sanity checks for names + if ctx.Config.Source == metadata.SourceType_pam_passphrase { + // login protectors don't need a name (we use the username instead) + if name != "" { + return nil, &ErrLoginProtectorName{name, ctx.TargetUser} + } + } else { + // non-login protectors need a name (so we can distinguish between them) + if name == "" { + return nil, &ErrMissingProtectorName{ctx.Config.Source} + } + // we don't want to duplicate naming + if err := checkForProtectorWithName(ctx, name); err != nil { + return nil, err + } + } + + var err error + protector := &Protector{ + Context: ctx, + data: &metadata.ProtectorData{ + Name: name, + Source: ctx.Config.Source, + }, + created: true, + ownerIfCreating: owner, + } + + // Extra data is needed for some SourceTypes + switch protector.data.Source { + case metadata.SourceType_pam_passphrase: + // As the pam passphrases are user specific, we also store the + // UID for this kind of source. + protector.data.Uid = int64(util.AtoiOrPanic(ctx.TargetUser.Uid)) + // Make sure we aren't duplicating protectors + if err = checkIfUserHasLoginProtector(ctx, protector.data.Uid); err != nil { + return nil, err + } + fallthrough + case metadata.SourceType_custom_passphrase: + // Our passphrase sources need costs and a random salt. + if protector.data.Salt, err = crypto.NewRandomBuffer(metadata.SaltLen); err != nil { + return nil, err + } + + protector.data.Costs = ctx.Config.HashCosts + } + + // Randomly create the underlying protector key (and wipe if we fail) + if protector.key, err = crypto.NewRandomKey(metadata.InternalKeyLen); err != nil { + return nil, err + } + protector.data.ProtectorDescriptor, err = crypto.ComputeKeyDescriptor(protector.key, 1) + if err != nil { + protector.Lock() + return nil, err + } + + if err = protector.Rewrap(keyFn); err != nil { + protector.Lock() + return nil, err + } + + return protector, nil +} + +// GetProtector retrieves a Protector with a specific descriptor. The Protector +// is still locked in this case, so it must be unlocked before using certain +// methods. +func GetProtector(ctx *Context, descriptor string) (*Protector, error) { + log.Printf("Getting protector %s", descriptor) + err := ctx.checkContext() + if err != nil { + return nil, err + } + + protector := &Protector{Context: ctx} + protector.data, err = ctx.Mount.GetRegularProtector(descriptor, ctx.TrustedUser) + return protector, err +} + +// GetProtectorFromOption retrieves a protector based on a protector option. +// If the option had a load error, this function returns that error. The +// Protector is still locked in this case, so it must be unlocked before using +// certain methods. +func GetProtectorFromOption(ctx *Context, option *ProtectorOption) (*Protector, error) { + log.Printf("Getting protector %s from option", option.Descriptor()) + if err := ctx.checkContext(); err != nil { + return nil, err + } + if option.LoadError != nil { + return nil, option.LoadError + } + + // Replace the context if this is a linked protector + if option.LinkedMount != nil { + ctx = &Context{ctx.Config, option.LinkedMount, ctx.TargetUser, ctx.TrustedUser} + } + return &Protector{Context: ctx, data: option.data}, nil +} + +// Descriptor returns the protector descriptor. +func (protector *Protector) Descriptor() string { + return protector.data.ProtectorDescriptor +} + +// Destroy removes a protector from the filesystem. The internal key should +// still be wiped with Lock(). +func (protector *Protector) Destroy() error { + return protector.Context.Mount.RemoveProtector(protector.Descriptor()) +} + +// Revert destroys a protector if it was created, but does nothing if it was +// just queried from the filesystem. +func (protector *Protector) Revert() error { + if !protector.created { + return nil + } + return protector.Destroy() +} + +func (protector *Protector) String() string { + return fmt.Sprintf("Protector: %s\nMountpoint: %s\nSource: %s\nName: %s\nCosts: %v\nUID: %d", + protector.Descriptor(), protector.Context.Mount, protector.data.Source, + protector.data.Name, protector.data.Costs, protector.data.Uid) +} + +// Unlock unwraps the Protector's internal key. The keyFn provided to unwrap the +// Protector key will be retried as necessary to get the correct key. Lock() +// should be called after use. Does nothing if protector is already unlocked. +func (protector *Protector) Unlock(keyFn KeyFunc) (err error) { + if protector.key != nil { + return + } + protector.key, err = unwrapProtectorKey(ProtectorInfo{protector.data}, keyFn) + return +} + +// Lock wipes a Protector's internal Key. It should always be called after using +// an unlocked Protector. This is often done with a defer statement. There is +// no effect if called multiple times. +func (protector *Protector) Lock() error { + err := protector.key.Wipe() + protector.key = nil + return err +} + +// Rewrap updates the data that is wrapping the Protector Key. This is useful if +// a user's password has changed, for example. The keyFn provided to rewrap +// the Protector key will only be called once. Requires unlocked Protector. +func (protector *Protector) Rewrap(keyFn KeyFunc) error { + if protector.key == nil { + return ErrLocked + } + wrappingKey, err := getWrappingKey(ProtectorInfo{protector.data}, keyFn, false) + if err != nil { + return err + } + + // Revert change to wrapped key on failure + oldWrappedKey := protector.data.WrappedKey + defer func() { + wrappingKey.Wipe() + if err != nil { + protector.data.WrappedKey = oldWrappedKey + } + }() + + if protector.data.WrappedKey, err = crypto.Wrap(wrappingKey, protector.key); err != nil { + return err + } + + return protector.Context.Mount.AddProtector(protector.data, protector.ownerIfCreating) +} diff --git a/vendor/github.com/google/fscrypt/actions/recovery.go b/vendor/github.com/google/fscrypt/actions/recovery.go new file mode 100644 index 000000000..8a769cc7e --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/recovery.go @@ -0,0 +1,131 @@ +/* + * recovery.go - support for generating recovery passphrases + * + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package actions + +import ( + "fmt" + "os" + "strconv" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// modifiedContextWithSource returns a copy of ctx with the protector source +// replaced by source. +func modifiedContextWithSource(ctx *Context, source metadata.SourceType) *Context { + modifiedConfig := *ctx.Config + modifiedConfig.Source = source + modifiedCtx := *ctx + modifiedCtx.Config = &modifiedConfig + return &modifiedCtx +} + +// AddRecoveryPassphrase randomly generates a recovery passphrase and adds it as +// a custom_passphrase protector for the given Policy. +func AddRecoveryPassphrase(policy *Policy, dirname string) (*crypto.Key, *Protector, error) { + // 20 random characters in a-z is 94 bits of entropy, which is way more + // than enough for a passphrase which still goes through the usual + // passphrase hashing which makes it extremely costly to brute force. + passphrase, err := crypto.NewRandomPassphrase(20) + if err != nil { + return nil, nil, err + } + defer func() { + if err != nil { + passphrase.Wipe() + } + }() + getPassphraseFn := func(info ProtectorInfo, retry bool) (*crypto.Key, error) { + // CreateProtector() wipes the passphrase, but in this case we + // still need it for later, so make a copy. + return passphrase.Clone() + } + var recoveryProtector *Protector + customCtx := modifiedContextWithSource(policy.Context, metadata.SourceType_custom_passphrase) + seq := 1 + for { + // Automatically generate a name for the recovery protector. + name := "Recovery passphrase for " + dirname + if seq != 1 { + name += " (" + strconv.Itoa(seq) + ")" + } + recoveryProtector, err = CreateProtector(customCtx, name, getPassphraseFn, policy.ownerIfCreating) + if err == nil { + break + } + if _, ok := err.(*ErrProtectorNameExists); !ok { + return nil, nil, err + } + seq++ + } + if err := policy.AddProtector(recoveryProtector); err != nil { + recoveryProtector.Revert() + return nil, nil, err + } + return passphrase, recoveryProtector, nil +} + +// WriteRecoveryInstructions writes a recovery passphrase and instructions to a +// file. This file should initially be located in the encrypted directory +// protected by the passphrase itself. It's up to the user to store the +// passphrase in a different location if they actually need it. +func WriteRecoveryInstructions(recoveryPassphrase *crypto.Key, recoveryProtector *Protector, + policy *Policy, path string) error { + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600) + if err != nil { + return err + } + defer file.Close() + str := fmt.Sprintf( + `fscrypt automatically generated a recovery passphrase for this directory: + + %s + +It did this because you chose to protect this directory with your login +passphrase, but this directory is not on the root filesystem. + +Copy this passphrase to a safe place if you want to still be able to unlock this +directory if you re-install the operating system or connect this storage media +to a different system (which would result in your login protector being lost). + +To unlock this directory using this recovery passphrase, run 'fscrypt unlock' +and select the protector named %q. + +If you want to disable recovery passphrase generation (not recommended), +re-create this directory and pass the --no-recovery option to 'fscrypt encrypt'. +Alternatively, you can remove this recovery passphrase protector using: + + fscrypt metadata remove-protector-from-policy --force --protector=%s:%s --policy=%s:%s + +It is safe to keep it around though, as the recovery passphrase is high-entropy. +`, recoveryPassphrase.Data(), recoveryProtector.data.Name, + recoveryProtector.Context.Mount.Path, recoveryProtector.data.ProtectorDescriptor, + policy.Context.Mount.Path, policy.data.KeyDescriptor) + if _, err = file.WriteString(str); err != nil { + return err + } + if recoveryProtector.ownerIfCreating != nil { + if err = util.Chown(file, recoveryProtector.ownerIfCreating); err != nil { + return err + } + } + return file.Sync() +} diff --git a/vendor/github.com/google/fscrypt/crypto/crypto.go b/vendor/github.com/google/fscrypt/crypto/crypto.go new file mode 100644 index 000000000..1f64b38bb --- /dev/null +++ b/vendor/github.com/google/fscrypt/crypto/crypto.go @@ -0,0 +1,228 @@ +/* + * crypto.go - Cryptographic algorithms used by the rest of fscrypt. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package crypto manages all the cryptography for fscrypt. This includes: +// - Key management (key.go) +// - Securely holding keys in memory +// - Making recovery keys +// - Randomness (rand.go) +// - Cryptographic algorithms (crypto.go) +// - encryption (AES256-CTR) +// - authentication (SHA256-based HMAC) +// - key stretching (SHA256-based HKDF) +// - key wrapping/unwrapping (Encrypt then MAC) +// - passphrase-based key derivation (Argon2id) +// - key descriptor computation (double SHA512, or HKDF-SHA512) +package crypto + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "io" + + "github.com/pkg/errors" + "golang.org/x/crypto/argon2" + "golang.org/x/crypto/hkdf" + + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// Crypto error values +var ( + ErrBadAuth = errors.New("key authentication check failed") + ErrRecoveryCode = errors.New("invalid recovery code") + ErrMlockUlimit = errors.New("could not lock key in memory") +) + +// panicInputLength panics if "name" has invalid length (expected != actual) +func panicInputLength(name string, expected, actual int) { + if err := util.CheckValidLength(expected, actual); err != nil { + panic(errors.Wrap(err, name)) + } +} + +// checkWrappingKey returns an error if the wrapping key has the wrong length +func checkWrappingKey(wrappingKey *Key) error { + err := util.CheckValidLength(metadata.InternalKeyLen, wrappingKey.Len()) + return errors.Wrap(err, "wrapping key") +} + +// stretchKey stretches a key of length InternalKeyLen using unsalted HKDF to +// make two keys of length InternalKeyLen. +func stretchKey(key *Key) (encKey, authKey *Key) { + panicInputLength("hkdf key", metadata.InternalKeyLen, key.Len()) + + // The new hkdf function uses the hash and key to create a reader that + // can be used to securely initialize multiple keys. This means that + // reads on the hkdf give independent cryptographic keys. The hkdf will + // also always have enough entropy to read two keys. + hkdf := hkdf.New(sha256.New, key.data, nil, nil) + + encKey, err := NewFixedLengthKeyFromReader(hkdf, metadata.InternalKeyLen) + util.NeverError(err) + authKey, err = NewFixedLengthKeyFromReader(hkdf, metadata.InternalKeyLen) + util.NeverError(err) + + return +} + +// aesCTR runs AES256-CTR on the input using the provided key and iv. This +// function can be used to either encrypt or decrypt input of any size. Note +// that input and output must be the same size. +func aesCTR(key *Key, iv, input, output []byte) { + panicInputLength("aesCTR key", metadata.InternalKeyLen, key.Len()) + panicInputLength("aesCTR iv", metadata.IVLen, len(iv)) + panicInputLength("aesCTR output", len(input), len(output)) + + blockCipher, err := aes.NewCipher(key.data) + util.NeverError(err) // Key is checked to have correct length + + stream := cipher.NewCTR(blockCipher, iv) + stream.XORKeyStream(output, input) +} + +// getHMAC returns the SHA256-based HMAC of some data using the provided key. +func getHMAC(key *Key, data ...[]byte) []byte { + panicInputLength("hmac key", metadata.InternalKeyLen, key.Len()) + + mac := hmac.New(sha256.New, key.data) + for _, buffer := range data { + // SHA256 HMAC should never be unable to write the data + _, err := mac.Write(buffer) + util.NeverError(err) + } + + return mac.Sum(nil) +} + +// Wrap takes a wrapping Key of length InternalKeyLen, and uses it to wrap a +// secret Key of any length. This wrapping uses a random IV, the encrypted data, +// and an HMAC to verify the wrapping key was correct. All of this is included +// in the returned WrappedKeyData structure. +func Wrap(wrappingKey, secretKey *Key) (*metadata.WrappedKeyData, error) { + if err := checkWrappingKey(wrappingKey); err != nil { + return nil, err + } + + data := &metadata.WrappedKeyData{EncryptedKey: make([]byte, secretKey.Len())} + + // Get random IV + var err error + if data.IV, err = NewRandomBuffer(metadata.IVLen); err != nil { + return nil, err + } + + // Stretch key for encryption and authentication (unsalted). + encKey, authKey := stretchKey(wrappingKey) + defer encKey.Wipe() + defer authKey.Wipe() + + // Encrypt the secret and include the HMAC of the output ("Encrypt-then-MAC"). + aesCTR(encKey, data.IV, secretKey.data, data.EncryptedKey) + + data.Hmac = getHMAC(authKey, data.IV, data.EncryptedKey) + return data, nil +} + +// Unwrap takes a wrapping Key of length InternalKeyLen, and uses it to unwrap +// the WrappedKeyData to get the unwrapped secret Key. The Wrapped Key data +// includes an authentication check, so an error will be returned if that check +// fails. +func Unwrap(wrappingKey *Key, data *metadata.WrappedKeyData) (*Key, error) { + if err := checkWrappingKey(wrappingKey); err != nil { + return nil, err + } + + // Stretch key for encryption and authentication (unsalted). + encKey, authKey := stretchKey(wrappingKey) + defer encKey.Wipe() + defer authKey.Wipe() + + // Check validity of the HMAC + if !hmac.Equal(getHMAC(authKey, data.IV, data.EncryptedKey), data.Hmac) { + return nil, ErrBadAuth + } + + secretKey, err := NewBlankKey(len(data.EncryptedKey)) + if err != nil { + return nil, err + } + aesCTR(encKey, data.IV, data.EncryptedKey, secretKey.data) + + return secretKey, nil +} + +func computeKeyDescriptorV1(key *Key) string { + h1 := sha512.Sum512(key.data) + h2 := sha512.Sum512(h1[:]) + length := hex.DecodedLen(metadata.PolicyDescriptorLenV1) + return hex.EncodeToString(h2[:length]) +} + +func computeKeyDescriptorV2(key *Key) (string, error) { + // This algorithm is specified by the kernel. It uses unsalted + // HKDF-SHA512, where the application-information string is the prefix + // "fscrypt\0" followed by the HKDF_CONTEXT_KEY_IDENTIFIER byte. + hkdf := hkdf.New(sha512.New, key.data, nil, []byte("fscrypt\x00\x01")) + h := make([]byte, hex.DecodedLen(metadata.PolicyDescriptorLenV2)) + if _, err := io.ReadFull(hkdf, h); err != nil { + return "", err + } + return hex.EncodeToString(h), nil +} + +// ComputeKeyDescriptor computes the descriptor for a given cryptographic key. +// If policyVersion=1, it uses the first 8 bytes of the double application of +// SHA512 on the key. Use this for protectors and v1 policy keys. +// If policyVersion=2, it uses HKDF-SHA512 to compute a key identifier that's +// compatible with the kernel's key identifiers for v2 policy keys. +// In both cases, the resulting bytes are formatted as hex. +func ComputeKeyDescriptor(key *Key, policyVersion int64) (string, error) { + switch policyVersion { + case 1: + return computeKeyDescriptorV1(key), nil + case 2: + return computeKeyDescriptorV2(key) + default: + return "", errors.Errorf("policy version of %d is invalid", policyVersion) + } +} + +// PassphraseHash uses Argon2id to produce a Key given the passphrase, salt, and +// hashing costs. This method is designed to take a long time and consume +// considerable memory. For more information, see the documentation at +// https://godoc.org/golang.org/x/crypto/argon2. +func PassphraseHash(passphrase *Key, salt []byte, costs *metadata.HashingCosts) (*Key, error) { + t := uint32(costs.Time) + m := uint32(costs.Memory) + p := uint8(costs.Parallelism) + key := argon2.IDKey(passphrase.data, salt, t, m, p, metadata.InternalKeyLen) + + hash, err := NewBlankKey(metadata.InternalKeyLen) + if err != nil { + return nil, err + } + copy(hash.data, key) + return hash, nil +} diff --git a/vendor/github.com/google/fscrypt/crypto/key.go b/vendor/github.com/google/fscrypt/crypto/key.go new file mode 100644 index 000000000..2e5744336 --- /dev/null +++ b/vendor/github.com/google/fscrypt/crypto/key.go @@ -0,0 +1,354 @@ +/* + * key.go - Cryptographic key management for fscrypt. Ensures that sensitive + * material is properly handled throughout the program. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package crypto + +/* +#include +#include +*/ +import "C" + +import ( + "bytes" + "crypto/subtle" + "encoding/base32" + "io" + "log" + "os" + "runtime" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +const ( + // Keys need to readable and writable, but hidden from other processes. + keyProtection = unix.PROT_READ | unix.PROT_WRITE + keyMmapFlags = unix.MAP_PRIVATE | unix.MAP_ANONYMOUS +) + +/* +UseMlock determines whether we should use the mlock/munlock syscalls to +prevent sensitive data like keys and passphrases from being paged to disk. +UseMlock defaults to true, but can be set to false if the application calling +into this library has insufficient privileges to lock memory. Code using this +package could also bind this setting to a flag by using: + + flag.BoolVar(&crypto.UseMlock, "lock-memory", true, "lock keys in memory") +*/ +var UseMlock = true + +/* +Key protects some arbitrary buffer of cryptographic material. Its methods +ensure that the Key's data is locked in memory before being used (if +UseMlock is set to true), and is wiped and unlocked after use (via the Wipe() +method). This data is never accessed outside of the fscrypt/crypto package +(except for the UnsafeData method). If a key is successfully created, the +Wipe() method should be called after it's use. For example: + + func UseKeyFromStdin() error { + key, err := NewKeyFromReader(os.Stdin) + if err != nil { + return err + } + defer key.Wipe() + + // Do stuff with key + + return nil + } + +The Wipe() method will also be called when a key is garbage collected; however, +it is best practice to clear the key as soon as possible, so it spends a minimal +amount of time in memory. + +Note that Key is not thread safe, as a key could be wiped while another thread +is using it. Also, calling Wipe() from two threads could cause an error as +memory could be freed twice. +*/ +type Key struct { + data []byte +} + +// NewBlankKey constructs a blank key of a specified length and returns an error +// if we are unable to allocate or lock the necessary memory. +func NewBlankKey(length int) (*Key, error) { + if length == 0 { + return &Key{data: nil}, nil + } else if length < 0 { + return nil, errors.Errorf("requested key length %d is negative", length) + } + + flags := keyMmapFlags + if UseMlock { + flags |= unix.MAP_LOCKED + } + + // See MAP_ANONYMOUS in http://man7.org/linux/man-pages/man2/mmap.2.html + data, err := unix.Mmap(-1, 0, length, keyProtection, flags) + if err == unix.EAGAIN { + return nil, ErrMlockUlimit + } + if err != nil { + return nil, errors.Wrapf(err, + "failed to allocate (mmap) key buffer of length %d", length) + } + + key := &Key{data: data} + + // Backup finalizer in case user forgets to "defer key.Wipe()" + runtime.SetFinalizer(key, (*Key).Wipe) + return key, nil +} + +// Wipe destroys a Key by zeroing and freeing the memory. The data is zeroed +// even if Wipe returns an error, which occurs if we are unable to unlock or +// free the key memory. Wipe does nothing if the key is already wiped or is nil. +func (key *Key) Wipe() error { + // We do nothing if key or key.data is nil so that Wipe() is idempotent + // and so Wipe() can be called on keys which have already been cleared. + if key != nil && key.data != nil { + data := key.data + key.data = nil + + for i := range data { + data[i] = 0 + } + + if err := unix.Munmap(data); err != nil { + log.Printf("unix.Munmap() failed: %v", err) + return errors.Wrapf(err, "failed to free (munmap) key buffer") + } + } + return nil +} + +// Len is the underlying data buffer's length. +func (key *Key) Len() int { + return len(key.data) +} + +// Equals compares the contents of two keys, returning true if they have the same +// key data. This function runs in constant time. +func (key *Key) Equals(key2 *Key) bool { + return subtle.ConstantTimeCompare(key.data, key2.data) == 1 +} + +// resize returns a new key with size requestedSize and the appropriate data +// copied over. The original data is wiped. This method does nothing and returns +// itself if the key's length equals requestedSize. +func (key *Key) resize(requestedSize int) (*Key, error) { + if key.Len() == requestedSize { + return key, nil + } + defer key.Wipe() + + resizedKey, err := NewBlankKey(requestedSize) + if err != nil { + return nil, err + } + copy(resizedKey.data, key.data) + return resizedKey, nil +} + +// Data returns a slice of the key's underlying data. Note that this may become +// outdated if the key is resized. +func (key *Key) Data() []byte { + return key.data +} + +// UnsafePtr returns an unsafe pointer to the key's underlying data. Note that +// this will only be valid as long as the key is not resized. +func (key *Key) UnsafePtr() unsafe.Pointer { + return util.Ptr(key.data) +} + +// UnsafeToCString makes a copy of the string's data into a null-terminated C +// string allocated by C. Note that this method is unsafe as this C copy has no +// locking or wiping functionality. The key shouldn't contain any `\0` bytes. +func (key *Key) UnsafeToCString() unsafe.Pointer { + size := C.size_t(key.Len()) + data := C.calloc(size+1, 1) + C.memcpy(data, util.Ptr(key.data), size) + return data +} + +// Clone creates a key as a copy of another one. +func (key *Key) Clone() (*Key, error) { + newKey, err := NewBlankKey(key.Len()) + if err != nil { + return nil, err + } + copy(newKey.data, key.data) + return newKey, nil +} + +// NewKeyFromCString creates of a copy of some C string's data in a key. Note +// that the original C string is not modified at all, so steps must be taken to +// ensure that this original copy is secured. +func NewKeyFromCString(str unsafe.Pointer) (*Key, error) { + size := C.strlen((*C.char)(str)) + key, err := NewBlankKey(int(size)) + if err != nil { + return nil, err + } + C.memcpy(util.Ptr(key.data), str, size) + return key, nil +} + +// NewKeyFromReader constructs a key of arbitrary length by reading from reader +// until hitting EOF. +func NewKeyFromReader(reader io.Reader) (*Key, error) { + // Use an initial key size of a page. As Mmap allocates a page anyway, + // there isn't much additional overhead from starting with a whole page. + key, err := NewBlankKey(os.Getpagesize()) + if err != nil { + return nil, err + } + + totalBytesRead := 0 + for { + bytesRead, err := reader.Read(key.data[totalBytesRead:]) + totalBytesRead += bytesRead + + switch err { + case nil: + // Need to continue reading. Grow key if necessary + if key.Len() == totalBytesRead { + if key, err = key.resize(2 * key.Len()); err != nil { + return nil, err + } + } + case io.EOF: + // Getting the EOF error means we are done + return key.resize(totalBytesRead) + default: + // Fail if Read() has a failure + key.Wipe() + return nil, err + } + } +} + +// NewFixedLengthKeyFromReader constructs a key with a specified length by +// reading exactly length bytes from reader. +func NewFixedLengthKeyFromReader(reader io.Reader, length int) (*Key, error) { + key, err := NewBlankKey(length) + if err != nil { + return nil, err + } + if _, err := io.ReadFull(reader, key.data); err != nil { + key.Wipe() + return nil, err + } + return key, nil +} + +var ( + // The recovery code is base32 with a dash between each block of 8 characters. + encoding = base32.StdEncoding + blockSize = 8 + separator = []byte("-") + encodedLength = encoding.EncodedLen(metadata.PolicyKeyLen) + decodedLength = encoding.DecodedLen(encodedLength) + // RecoveryCodeLength is the number of bytes in every recovery code + RecoveryCodeLength = (encodedLength/blockSize)*(blockSize+len(separator)) - len(separator) +) + +// WriteRecoveryCode outputs key's recovery code to the provided writer. +// WARNING: This recovery key is enough to derive the original key, so it must +// be given the same level of protection as a raw cryptographic key. +func WriteRecoveryCode(key *Key, writer io.Writer) error { + if err := util.CheckValidLength(metadata.PolicyKeyLen, key.Len()); err != nil { + return errors.Wrap(err, "recovery key") + } + + // We store the base32 encoded data (without separators) in a temp key + encodedKey, err := NewBlankKey(encodedLength) + if err != nil { + return err + } + defer encodedKey.Wipe() + encoding.Encode(encodedKey.data, key.data) + + w := util.NewErrWriter(writer) + + // Write the blocks with separators between them + w.Write(encodedKey.data[:blockSize]) + for blockStart := blockSize; blockStart < encodedLength; blockStart += blockSize { + w.Write(separator) + + blockEnd := util.MinInt(blockStart+blockSize, encodedLength) + w.Write(encodedKey.data[blockStart:blockEnd]) + } + + // If any writes have failed, return the error + return w.Err() +} + +// ReadRecoveryCode gets the recovery code from the provided reader and returns +// the corresponding cryptographic key. +// WARNING: This recovery key is enough to derive the original key, so it must +// be given the same level of protection as a raw cryptographic key. +func ReadRecoveryCode(reader io.Reader) (*Key, error) { + // We store the base32 encoded data (without separators) in a temp key + encodedKey, err := NewBlankKey(encodedLength) + if err != nil { + return nil, err + } + defer encodedKey.Wipe() + + r := util.NewErrReader(reader) + + // Read the other blocks, checking the separators between them + r.Read(encodedKey.data[:blockSize]) + inputSeparator := make([]byte, len(separator)) + + for blockStart := blockSize; blockStart < encodedLength; blockStart += blockSize { + r.Read(inputSeparator) + if r.Err() == nil && !bytes.Equal(separator, inputSeparator) { + err = errors.Wrapf(ErrRecoveryCode, "invalid separator %q", inputSeparator) + return nil, err + } + + blockEnd := util.MinInt(blockStart+blockSize, encodedLength) + r.Read(encodedKey.data[blockStart:blockEnd]) + } + + // If any reads have failed, return the error + if r.Err() != nil { + return nil, errors.Wrapf(ErrRecoveryCode, "read error %v", r.Err()) + } + + // Now we decode the key, resizing if necessary + decodedKey, err := NewBlankKey(decodedLength) + if err != nil { + return nil, err + } + if _, err = encoding.Decode(decodedKey.data, encodedKey.data); err != nil { + return nil, errors.Wrap(ErrRecoveryCode, err.Error()) + } + return decodedKey.resize(metadata.PolicyKeyLen) +} diff --git a/vendor/github.com/google/fscrypt/crypto/rand.go b/vendor/github.com/google/fscrypt/crypto/rand.go new file mode 100644 index 000000000..7d1e55bf0 --- /dev/null +++ b/vendor/github.com/google/fscrypt/crypto/rand.go @@ -0,0 +1,98 @@ +/* + * rand.go - Reader used to generate secure random data for fscrypt. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package crypto + +import ( + "io" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// NewRandomBuffer uses the Linux Getrandom() syscall to create random bytes. If +// the operating system has insufficient randomness, the buffer creation will +// fail. This is an improvement over Go's built-in crypto/rand which will still +// return bytes if the system has insufficiency entropy. +// See: https://github.com/golang/go/issues/19274 +// +// While this syscall was only introduced in Kernel v3.17, it predates the +// introduction of filesystem encryption, so it introduces no additional +// compatibility issues. +func NewRandomBuffer(length int) ([]byte, error) { + buffer := make([]byte, length) + if _, err := io.ReadFull(randReader{}, buffer); err != nil { + return nil, err + } + return buffer, nil +} + +// NewRandomKey creates a random key of the specified length. This function uses +// the same random number generation process as NewRandomBuffer. +func NewRandomKey(length int) (*Key, error) { + return NewFixedLengthKeyFromReader(randReader{}, length) +} + +// NewRandomPassphrase creates a random passphrase of the specified length +// containing random alphabetic characters. +func NewRandomPassphrase(length int) (*Key, error) { + chars := []byte("abcdefghijklmnopqrstuvwxyz") + passphrase, err := NewBlankKey(length) + if err != nil { + return nil, err + } + for i := 0; i < length; { + // Get some random bytes. + raw, err := NewRandomKey((length - i) * 2) + if err != nil { + return nil, err + } + // Translate the random bytes into random characters. + for _, b := range raw.data { + if int(b) >= 256-(256%len(chars)) { + // Avoid bias towards the first characters in the list. + continue + } + c := chars[int(b)%len(chars)] + passphrase.data[i] = c + i++ + if i == length { + break + } + } + raw.Wipe() + } + return passphrase, nil +} + +// randReader just calls into Getrandom, so no internal data is needed. +type randReader struct{} + +func (r randReader) Read(buffer []byte) (int, error) { + n, err := unix.Getrandom(buffer, unix.GRND_NONBLOCK) + switch err { + case nil: + return n, nil + case unix.EAGAIN: + err = errors.New("insufficient entropy in pool") + case unix.ENOSYS: + err = errors.New("kernel must be v3.17 or later") + } + return 0, errors.Wrap(err, "getrandom() failed") +} diff --git a/vendor/github.com/google/fscrypt/filesystem/filesystem.go b/vendor/github.com/google/fscrypt/filesystem/filesystem.go new file mode 100644 index 000000000..27bfa2415 --- /dev/null +++ b/vendor/github.com/google/fscrypt/filesystem/filesystem.go @@ -0,0 +1,1088 @@ +/* + * filesystem.go - Contains the functionality for a specific filesystem. This + * includes the commands to setup the filesystem, apply policies, and locate + * metadata. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package filesystem deals with the structure of the files on disk used to +// store the metadata for fscrypt. Specifically, this package includes: +// - mountpoint management (mountpoint.go) +// - querying existing mounted filesystems +// - getting filesystems from a UUID +// - finding the filesystem for a specific path +// - metadata organization (filesystem.go) +// - setting up a mounted filesystem for use with fscrypt +// - adding/querying/deleting metadata +// - making links to other filesystems' metadata +// - following links to get data from other filesystems +package filesystem + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "os" + "os/user" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// ErrAlreadySetup indicates that a filesystem is already setup for fscrypt. +type ErrAlreadySetup struct { + Mount *Mount +} + +func (err *ErrAlreadySetup) Error() string { + return fmt.Sprintf("filesystem %s is already setup for use with fscrypt", + err.Mount.Path) +} + +// ErrCorruptMetadata indicates that an fscrypt metadata file is corrupt. +type ErrCorruptMetadata struct { + Path string + UnderlyingError error +} + +func (err *ErrCorruptMetadata) Error() string { + return fmt.Sprintf("fscrypt metadata file at %q is corrupt: %s", + err.Path, err.UnderlyingError) +} + +// ErrFollowLink indicates that a protector link can't be followed. +type ErrFollowLink struct { + Link string + UnderlyingError error +} + +func (err *ErrFollowLink) Error() string { + return fmt.Sprintf("cannot follow filesystem link %q: %s", + err.Link, err.UnderlyingError) +} + +// ErrInsecurePermissions indicates that a filesystem is not considered to be +// setup for fscrypt because a metadata directory has insecure permissions. +type ErrInsecurePermissions struct { + Path string +} + +func (err *ErrInsecurePermissions) Error() string { + return fmt.Sprintf("%q has insecure permissions (world-writable without sticky bit)", + err.Path) +} + +// ErrMakeLink indicates that a protector link can't be created. +type ErrMakeLink struct { + Target *Mount + UnderlyingError error +} + +func (err *ErrMakeLink) Error() string { + return fmt.Sprintf("cannot create filesystem link to %q: %s", + err.Target.Path, err.UnderlyingError) +} + +// ErrMountOwnedByAnotherUser indicates that the mountpoint root directory is +// owned by a user that isn't trusted in the current context, so we don't +// consider fscrypt to be properly setup on the filesystem. +type ErrMountOwnedByAnotherUser struct { + Mount *Mount +} + +func (err *ErrMountOwnedByAnotherUser) Error() string { + return fmt.Sprintf("another non-root user owns the root directory of %s", err.Mount.Path) +} + +// ErrNoCreatePermission indicates that the current user lacks permission to +// create fscrypt metadata on the given filesystem. +type ErrNoCreatePermission struct { + Mount *Mount +} + +func (err *ErrNoCreatePermission) Error() string { + return fmt.Sprintf("user lacks permission to create fscrypt metadata on %s", err.Mount.Path) +} + +// ErrNotAMountpoint indicates that a path is not a mountpoint. +type ErrNotAMountpoint struct { + Path string +} + +func (err *ErrNotAMountpoint) Error() string { + return fmt.Sprintf("%q is not a mountpoint", err.Path) +} + +// ErrNotSetup indicates that a filesystem is not setup for fscrypt. +type ErrNotSetup struct { + Mount *Mount +} + +func (err *ErrNotSetup) Error() string { + return fmt.Sprintf("filesystem %s is not setup for use with fscrypt", err.Mount.Path) +} + +// ErrSetupByAnotherUser indicates that one or more of the fscrypt metadata +// directories is owned by a user that isn't trusted in the current context, so +// we don't consider fscrypt to be properly setup on the filesystem. +type ErrSetupByAnotherUser struct { + Mount *Mount +} + +func (err *ErrSetupByAnotherUser) Error() string { + return fmt.Sprintf("another non-root user owns fscrypt metadata directories on %s", err.Mount.Path) +} + +// ErrSetupNotSupported indicates that the given filesystem type is not +// supported for fscrypt setup. +type ErrSetupNotSupported struct { + Mount *Mount +} + +func (err *ErrSetupNotSupported) Error() string { + return fmt.Sprintf("filesystem type %s is not supported for fscrypt setup", + err.Mount.FilesystemType) +} + +// ErrPolicyNotFound indicates that the policy metadata was not found. +type ErrPolicyNotFound struct { + Descriptor string + Mount *Mount +} + +func (err *ErrPolicyNotFound) Error() string { + return fmt.Sprintf("policy metadata for %s not found on filesystem %s", + err.Descriptor, err.Mount.Path) +} + +// ErrProtectorNotFound indicates that the protector metadata was not found. +type ErrProtectorNotFound struct { + Descriptor string + Mount *Mount +} + +func (err *ErrProtectorNotFound) Error() string { + return fmt.Sprintf("protector metadata for %s not found on filesystem %s", + err.Descriptor, err.Mount.Path) +} + +// SortDescriptorsByLastMtime indicates whether descriptors are sorted by last +// modification time when being listed. This can be set to true to get +// consistent output for testing. +var SortDescriptorsByLastMtime = false + +// Mount contains information for a specific mounted filesystem. +// Path - Absolute path where the directory is mounted +// FilesystemType - Type of the mounted filesystem, e.g. "ext4" +// Device - Device for filesystem (empty string if we cannot find one) +// DeviceNumber - Device number of the filesystem. This is set even if +// Device isn't, since all filesystems have a device +// number assigned by the kernel, even pseudo-filesystems. +// Subtree - The mounted subtree of the filesystem. This is usually +// "/", meaning that the entire filesystem is mounted, but +// it can differ for bind mounts. +// ReadOnly - True if this is a read-only mount +// +// In order to use a Mount to store fscrypt metadata, some directories must be +// setup first. Specifically, the directories created look like: +// +// └── .fscrypt +// ├── policies +// └── protectors +// +// These "policies" and "protectors" directories will contain files that are +// the corresponding metadata structures for policies and protectors. The public +// interface includes functions for setting up these directories and Adding, +// Getting, and Removing these files. +// +// There is also the ability to reference another filesystem's metadata. This is +// used when a Policy on filesystem A is protected with Protector on filesystem +// B. In this scenario, we store a "link file" in the protectors directory. +// +// We also allow ".fscrypt" to be a symlink which was previously created. This +// allows login protectors to be created when the root filesystem is read-only, +// provided that "/.fscrypt" is a symlink pointing to a writable location. +type Mount struct { + Path string + FilesystemType string + Device string + DeviceNumber DeviceNumber + Subtree string + ReadOnly bool +} + +// PathSorter allows mounts to be sorted by Path. +type PathSorter []*Mount + +func (p PathSorter) Len() int { return len(p) } +func (p PathSorter) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p PathSorter) Less(i, j int) bool { return p[i].Path < p[j].Path } + +const ( + // Names of the various directories used in fscrypt + baseDirName = ".fscrypt" + policyDirName = "policies" + protectorDirName = "protectors" + tempPrefix = ".tmp" + linkFileExtension = ".link" + + // The base directory should be read-only (except for the creator) + basePermissions = 0755 + + // The metadata files shouldn't be readable or writable by other users. + // Having them be world-readable wouldn't necessarily be a huge issue, + // but given that some of these files contain (strong) password hashes, + // we error on the side of caution -- similar to /etc/shadow. + // Note: existing files on-disk might have mode 0644, as that was the + // mode used by fscrypt v0.3.2 and earlier. + filePermissions = os.FileMode(0600) + + // Maximum size of a metadata file. This value is arbitrary, and it can + // be changed. We just set a reasonable limit that shouldn't be reached + // in practice, except by users trying to cause havoc by creating + // extremely large files in the metadata directories. + maxMetadataFileSize = 16384 +) + +// SetupMode is a mode for creating the fscrypt metadata directories. +type SetupMode int + +const ( + // SingleUserWritable specifies to make the fscrypt metadata directories + // writable by a single user (usually root) only. + SingleUserWritable SetupMode = iota + // WorldWritable specifies to make the fscrypt metadata directories + // world-writable (with the sticky bit set). + WorldWritable +) + +func (m *Mount) String() string { + return fmt.Sprintf(`%s + FilesystemType: %s + Device: %s`, m.Path, m.FilesystemType, m.Device) +} + +// BaseDir returns the path to the base fscrypt directory for this filesystem. +func (m *Mount) BaseDir() string { + rawBaseDir := filepath.Join(m.Path, baseDirName) + // We allow the base directory to be a symlink, but some callers need + // the real path, so dereference the symlink here if needed. Since the + // directory the symlink points to may not exist yet, we have to read + // the symlink manually rather than use filepath.EvalSymlinks. + target, err := os.Readlink(rawBaseDir) + if err != nil { + return rawBaseDir // not a symlink + } + if filepath.IsAbs(target) { + return target + } + return filepath.Join(m.Path, target) +} + +// ProtectorDir returns the directory containing the protector metadata. +func (m *Mount) ProtectorDir() string { + return filepath.Join(m.BaseDir(), protectorDirName) +} + +// protectorPath returns the full path to a regular protector file with the +// specified descriptor. +func (m *Mount) protectorPath(descriptor string) string { + return filepath.Join(m.ProtectorDir(), descriptor) +} + +// linkedProtectorPath returns the full path to a linked protector file with the +// specified descriptor. +func (m *Mount) linkedProtectorPath(descriptor string) string { + return m.protectorPath(descriptor) + linkFileExtension +} + +// PolicyDir returns the directory containing the policy metadata. +func (m *Mount) PolicyDir() string { + return filepath.Join(m.BaseDir(), policyDirName) +} + +// PolicyPath returns the full path to a regular policy file with the +// specified descriptor. +func (m *Mount) PolicyPath(descriptor string) string { + return filepath.Join(m.PolicyDir(), descriptor) +} + +// tempMount creates a temporary directory alongside this Mount's base fscrypt +// directory and returns a temporary Mount which represents this temporary +// directory. The caller is responsible for removing this temporary directory. +func (m *Mount) tempMount() (*Mount, error) { + tempDir, err := ioutil.TempDir(filepath.Dir(m.BaseDir()), tempPrefix) + return &Mount{Path: tempDir}, err +} + +// ErrEncryptionNotEnabled indicates that encryption is not enabled on the given +// filesystem. +type ErrEncryptionNotEnabled struct { + Mount *Mount +} + +func (err *ErrEncryptionNotEnabled) Error() string { + return fmt.Sprintf("encryption not enabled on filesystem %s (%s).", + err.Mount.Path, err.Mount.Device) +} + +// ErrEncryptionNotSupported indicates that encryption is not supported on the +// given filesystem. +type ErrEncryptionNotSupported struct { + Mount *Mount +} + +func (err *ErrEncryptionNotSupported) Error() string { + return fmt.Sprintf("This kernel doesn't support encryption on %s filesystems.", + err.Mount.FilesystemType) +} + +// EncryptionSupportError adds filesystem-specific context to the +// ErrEncryptionNotEnabled and ErrEncryptionNotSupported errors from the +// metadata package. +func (m *Mount) EncryptionSupportError(err error) error { + switch err { + case metadata.ErrEncryptionNotEnabled: + return &ErrEncryptionNotEnabled{m} + case metadata.ErrEncryptionNotSupported: + return &ErrEncryptionNotSupported{m} + } + return err +} + +// isFscryptSetupAllowed decides whether the given filesystem is allowed to be +// set up for fscrypt, without actually accessing it. This basically checks +// whether the filesystem type is one of the types that supports encryption, or +// at least is in some stage of planning for encrption support in the future. +// +// We need this list so that we can skip filesystems that are irrelevant for +// fscrypt without having to look for the fscrypt metadata directories on them, +// which can trigger errors, long delays, or side effects on some filesystems. +// +// Unfortunately, this means that if a completely new filesystem adds encryption +// support, then it will need to be manually added to this list. But it seems +// to be a worthwhile tradeoff to avoid the above issues. +func (m *Mount) isFscryptSetupAllowed() bool { + if m.Path == "/" { + // The root filesystem is always allowed, since it's where login + // protectors are stored. + return true + } + switch m.FilesystemType { + case "ext4", "f2fs", "ubifs", "btrfs", "ceph", "xfs": + return true + default: + return false + } +} + +// CheckSupport returns an error if this filesystem does not support encryption. +func (m *Mount) CheckSupport() error { + if !m.isFscryptSetupAllowed() { + return &ErrEncryptionNotSupported{m} + } + return m.EncryptionSupportError(metadata.CheckSupport(m.Path)) +} + +func checkOwnership(path string, info os.FileInfo, trustedUser *user.User) bool { + if trustedUser == nil { + return true + } + trustedUID := uint32(util.AtoiOrPanic(trustedUser.Uid)) + actualUID := info.Sys().(*syscall.Stat_t).Uid + if actualUID != 0 && actualUID != trustedUID { + log.Printf("WARNING: %q is owned by uid %d, but expected %d or 0", + path, actualUID, trustedUID) + return false + } + return true +} + +// CheckSetup returns an error if any of the fscrypt metadata directories do not +// exist. Will log any unexpected errors or incorrect permissions. +func (m *Mount) CheckSetup(trustedUser *user.User) error { + if !m.isFscryptSetupAllowed() { + return &ErrNotSetup{m} + } + // Check that the mountpoint directory itself is not a symlink and has + // proper ownership, as otherwise we can't trust anything beneath it. + info, err := loggedLstat(m.Path) + if err != nil { + return &ErrNotSetup{m} + } + if (info.Mode() & os.ModeSymlink) != 0 { + log.Printf("mountpoint directory %q cannot be a symlink", m.Path) + return &ErrNotSetup{m} + } + if !info.IsDir() { + log.Printf("mountpoint %q is not a directory", m.Path) + return &ErrNotSetup{m} + } + if !checkOwnership(m.Path, info, trustedUser) { + return &ErrMountOwnedByAnotherUser{m} + } + + // Check BaseDir similarly. However, unlike the other directories, we + // allow BaseDir to be a symlink, to support the use case of metadata + // for a read-only filesystem being redirected to a writable location. + info, err = loggedStat(m.BaseDir()) + if err != nil { + return &ErrNotSetup{m} + } + if !info.IsDir() { + log.Printf("%q is not a directory", m.BaseDir()) + return &ErrNotSetup{m} + } + if !checkOwnership(m.Path, info, trustedUser) { + return &ErrMountOwnedByAnotherUser{m} + } + + // Check that the policies and protectors directories aren't symlinks and + // have proper ownership. + subdirs := []string{m.PolicyDir(), m.ProtectorDir()} + for _, path := range subdirs { + info, err := loggedLstat(path) + if err != nil { + return &ErrNotSetup{m} + } + if (info.Mode() & os.ModeSymlink) != 0 { + log.Printf("directory %q cannot be a symlink", path) + return &ErrNotSetup{m} + } + if !info.IsDir() { + log.Printf("%q is not a directory", path) + return &ErrNotSetup{m} + } + // We are no longer too picky about the mode, given that + // 'fscrypt setup' now offers a choice of two different modes, + // and system administrators could customize it further. + // However, we can at least verify that if the directory is + // world-writable, then the sticky bit is also set. + if info.Mode()&(os.ModeSticky|0002) == 0002 { + log.Printf("%q is world-writable but doesn't have sticky bit set", path) + return &ErrInsecurePermissions{path} + } + if !checkOwnership(path, info, trustedUser) { + return &ErrSetupByAnotherUser{m} + } + } + return nil +} + +// makeDirectories creates the three metadata directories with the correct +// permissions. Note that this function overrides the umask. +func (m *Mount) makeDirectories(setupMode SetupMode) error { + // Zero the umask so we get the permissions we want + oldMask := unix.Umask(0) + defer func() { + unix.Umask(oldMask) + }() + + if err := os.Mkdir(m.BaseDir(), basePermissions); err != nil { + return err + } + + var dirMode os.FileMode + switch setupMode { + case SingleUserWritable: + dirMode = 0755 + case WorldWritable: + dirMode = os.ModeSticky | 0777 + } + if err := os.Mkdir(m.PolicyDir(), dirMode); err != nil { + return err + } + return os.Mkdir(m.ProtectorDir(), dirMode) +} + +// GetSetupMode returns the current mode for fscrypt metadata creation on this +// filesystem. +func (m *Mount) GetSetupMode() (SetupMode, *user.User, error) { + info1, err1 := os.Stat(m.PolicyDir()) + info2, err2 := os.Stat(m.ProtectorDir()) + + if err1 == nil && err2 == nil { + mask := os.ModeSticky | 0777 + mode1 := info1.Mode() & mask + mode2 := info2.Mode() & mask + uid1 := info1.Sys().(*syscall.Stat_t).Uid + uid2 := info2.Sys().(*syscall.Stat_t).Uid + user, err := util.UserFromUID(int64(uid1)) + if err == nil && mode1 == mode2 && uid1 == uid2 { + switch mode1 { + case mask: + return WorldWritable, nil, nil + case 0755: + return SingleUserWritable, user, nil + } + } + log.Printf("filesystem %s uses custom permissions on metadata directories", m.Path) + } + return -1, nil, errors.New("unable to determine setup mode") +} + +// Setup sets up the filesystem for use with fscrypt. Note that this merely +// creates the appropriate files on the filesystem. It does not actually modify +// the filesystem's feature flags. This operation is atomic; it either succeeds +// or no files in the baseDir are created. +func (m *Mount) Setup(mode SetupMode) error { + if m.CheckSetup(nil) == nil { + return &ErrAlreadySetup{m} + } + if !m.isFscryptSetupAllowed() { + return &ErrSetupNotSupported{m} + } + // We build the directories under a temp Mount and then move into place. + temp, err := m.tempMount() + if err != nil { + return err + } + defer os.RemoveAll(temp.Path) + + if err = temp.makeDirectories(mode); err != nil { + return err + } + + // Atomically move directory into place. + return os.Rename(temp.BaseDir(), m.BaseDir()) +} + +// RemoveAllMetadata removes all the policy and protector metadata from the +// filesystem. This operation is atomic; it either succeeds or no files in the +// baseDir are removed. +// WARNING: Will cause data loss if the metadata is used to encrypt +// directories (this could include directories on other filesystems). +func (m *Mount) RemoveAllMetadata() error { + if err := m.CheckSetup(nil); err != nil { + return err + } + // temp will hold the old metadata temporarily + temp, err := m.tempMount() + if err != nil { + return err + } + defer os.RemoveAll(temp.Path) + + // Move directory into temp (to be destroyed on defer) + return os.Rename(m.BaseDir(), temp.BaseDir()) +} + +func syncDirectory(dirPath string) error { + dirFile, err := os.Open(dirPath) + if err != nil { + return err + } + if err = dirFile.Sync(); err != nil { + dirFile.Close() + return err + } + return dirFile.Close() +} + +func (m *Mount) overwriteDataNonAtomic(path string, data []byte) error { + file, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|unix.O_NOFOLLOW, 0) + if err != nil { + return err + } + if _, err = file.Write(data); err != nil { + log.Printf("WARNING: overwrite of %q failed; file will be corrupted!", path) + file.Close() + return err + } + if err = file.Sync(); err != nil { + file.Close() + return err + } + if err = file.Close(); err != nil { + return err + } + log.Printf("successfully overwrote %q non-atomically", path) + return nil +} + +// writeData writes the given data to the given path such that, if possible, the +// data is either written to stable storage or an error is returned. If a file +// already exists at the path, it will be replaced. +// +// However, if the process doesn't have write permission to the directory but +// does have write permission to the file itself, then as a fallback the file is +// overwritten in-place rather than replaced. Note that this may be non-atomic. +func (m *Mount) writeData(path string, data []byte, owner *user.User, mode os.FileMode) error { + // Write the data to a temporary file, sync it, then rename into place + // so that the operation will be atomic. + dirPath := filepath.Dir(path) + tempFile, err := ioutil.TempFile(dirPath, tempPrefix) + if err != nil { + log.Print(err) + if os.IsPermission(err) { + if _, err = os.Lstat(path); err == nil { + log.Printf("trying non-atomic overwrite of %q", path) + return m.overwriteDataNonAtomic(path, data) + } + return &ErrNoCreatePermission{m} + } + return err + } + defer os.Remove(tempFile.Name()) + + // Ensure the new file has the right permissions mask. + if err = tempFile.Chmod(mode); err != nil { + tempFile.Close() + return err + } + // Override the file owner if one was specified. This happens when root + // needs to create files owned by a particular user. + if owner != nil { + if err = util.Chown(tempFile, owner); err != nil { + log.Printf("could not set owner of %q to %v: %v", + path, owner.Username, err) + tempFile.Close() + return err + } + } + if _, err = tempFile.Write(data); err != nil { + tempFile.Close() + return err + } + if err = tempFile.Sync(); err != nil { + tempFile.Close() + return err + } + if err = tempFile.Close(); err != nil { + return err + } + + if err = os.Rename(tempFile.Name(), path); err != nil { + return err + } + // Ensure the rename has been persisted before returning success. + return syncDirectory(dirPath) +} + +// addMetadata writes the metadata structure to the file with the specified +// path. This will overwrite any existing data. The operation is atomic. +func (m *Mount) addMetadata(path string, md metadata.Metadata, owner *user.User) error { + if err := md.CheckValidity(); err != nil { + return errors.Wrap(err, "provided metadata is invalid") + } + + data, err := proto.Marshal(md) + if err != nil { + return err + } + + mode := filePermissions + // If the file already exists, then preserve its owner and mode if + // possible. This is necessary because by default, for atomicity + // reasons we'll replace the file rather than overwrite it. + info, err := os.Lstat(path) + if err == nil { + if owner == nil && util.IsUserRoot() { + uid := info.Sys().(*syscall.Stat_t).Uid + if owner, err = util.UserFromUID(int64(uid)); err != nil { + log.Print(err) + } + } + mode = info.Mode() & 0777 + } else if !os.IsNotExist(err) { + log.Print(err) + } + + if owner != nil { + log.Printf("writing metadata to %q and setting owner to %s", path, owner.Username) + } else { + log.Printf("writing metadata to %q", path) + } + return m.writeData(path, data, owner, mode) +} + +// readMetadataFileSafe gets the contents of a metadata file extra-carefully, +// considering that it could be a malicious file created to cause a +// denial-of-service. Specifically, the following checks are done: +// +// - It must be a regular file, not another type of file like a symlink or FIFO. +// (Symlinks aren't bad by themselves, but given that a malicious user could +// point one to absolutely anywhere, and there is no known use case for the +// metadata files themselves being symlinks, it seems best to disallow them.) +// - It must have a reasonable size (<= maxMetadataFileSize). +// - If trustedUser is non-nil, then the file must be owned by the given user +// or by root. +// +// Take care to avoid TOCTOU (time-of-check-time-of-use) bugs when doing these +// tests. Notably, we must open the file before checking the file type, as the +// file type could change between any previous checks and the open. When doing +// this, O_NOFOLLOW is needed to avoid following a symlink (this applies to the +// last path component only), and O_NONBLOCK is needed to avoid blocking if the +// file is a FIFO. +// +// This function returns the data read as well as the UID of the user who owns +// the file. The returned UID is needed for login protectors, where the UID +// needs to be cross-checked with the UID stored in the file itself. +func readMetadataFileSafe(path string, trustedUser *user.User) ([]byte, int64, error) { + file, err := os.OpenFile(path, os.O_RDONLY|unix.O_NOFOLLOW|unix.O_NONBLOCK, 0) + if err != nil { + return nil, -1, err + } + defer file.Close() + + info, err := file.Stat() + if err != nil { + return nil, -1, err + } + if !info.Mode().IsRegular() { + return nil, -1, &ErrCorruptMetadata{path, errors.New("not a regular file")} + } + if !checkOwnership(path, info, trustedUser) { + return nil, -1, &ErrCorruptMetadata{path, errors.New("metadata file belongs to another user")} + } + // Clear O_NONBLOCK, since it has served its purpose when opening the + // file, and the behavior of reading from a regular file with O_NONBLOCK + // is technically unspecified. + if _, err = unix.FcntlInt(file.Fd(), unix.F_SETFL, 0); err != nil { + return nil, -1, &os.PathError{Op: "clearing O_NONBLOCK", Path: path, Err: err} + } + // Read the file contents, allowing at most maxMetadataFileSize bytes. + reader := &io.LimitedReader{R: file, N: maxMetadataFileSize + 1} + data, err := ioutil.ReadAll(reader) + if err != nil { + return nil, -1, err + } + if reader.N == 0 { + return nil, -1, &ErrCorruptMetadata{path, errors.New("metadata file size limit exceeded")} + } + return data, int64(info.Sys().(*syscall.Stat_t).Uid), nil +} + +// getMetadata reads the metadata structure from the file with the specified +// path. Only reads normal metadata files, not linked metadata. +func (m *Mount) getMetadata(path string, trustedUser *user.User, md metadata.Metadata) (int64, error) { + data, owner, err := readMetadataFileSafe(path, trustedUser) + if err != nil { + log.Printf("could not read metadata from %q: %v", path, err) + return -1, err + } + + if err := proto.Unmarshal(data, md); err != nil { + return -1, &ErrCorruptMetadata{path, err} + } + + if err := md.CheckValidity(); err != nil { + return -1, &ErrCorruptMetadata{path, err} + } + + log.Printf("successfully read metadata from %q", path) + return owner, nil +} + +// removeMetadata deletes the metadata struct from the file with the specified +// path. Works with regular or linked metadata. +func (m *Mount) removeMetadata(path string) error { + if err := os.Remove(path); err != nil { + log.Printf("could not remove metadata file at %q: %v", path, err) + return err + } + + log.Printf("successfully removed metadata file at %q", path) + return nil +} + +// AddProtector adds the protector metadata to this filesystem's storage. This +// will overwrite the value of an existing protector with this descriptor. This +// will fail with ErrLinkedProtector if a linked protector with this descriptor +// already exists on the filesystem. +func (m *Mount) AddProtector(data *metadata.ProtectorData, owner *user.User) error { + var err error + if err = m.CheckSetup(nil); err != nil { + return err + } + if isRegularFile(m.linkedProtectorPath(data.ProtectorDescriptor)) { + return errors.Errorf("cannot modify linked protector %s on filesystem %s", + data.ProtectorDescriptor, m.Path) + } + path := m.protectorPath(data.ProtectorDescriptor) + return m.addMetadata(path, data, owner) +} + +// AddLinkedProtector adds a link in this filesystem to the protector metadata +// in the dest filesystem, if one doesn't already exist. On success, the return +// value is a nil error and a bool that is true iff the link is newly created. +func (m *Mount) AddLinkedProtector(descriptor string, dest *Mount, trustedUser *user.User, + ownerIfCreating *user.User) (bool, error) { + if err := m.CheckSetup(trustedUser); err != nil { + return false, err + } + // Check that the link is good (descriptor exists, filesystem has UUID). + if _, err := dest.GetRegularProtector(descriptor, trustedUser); err != nil { + return false, err + } + + linkPath := m.linkedProtectorPath(descriptor) + + // Check whether the link already exists. + existingLink, _, err := readMetadataFileSafe(linkPath, trustedUser) + if err == nil { + existingLinkedMnt, err := getMountFromLink(string(existingLink)) + if err != nil { + return false, errors.Wrap(err, linkPath) + } + if existingLinkedMnt != dest { + return false, errors.Errorf("link %q points to %q, but expected %q", + linkPath, existingLinkedMnt.Path, dest.Path) + } + return false, nil + } + if !os.IsNotExist(err) { + return false, err + } + + var newLink string + newLink, err = makeLink(dest) + if err != nil { + return false, err + } + return true, m.writeData(linkPath, []byte(newLink), ownerIfCreating, filePermissions) +} + +// GetRegularProtector looks up the protector metadata by descriptor. This will +// fail with ErrProtectorNotFound if the descriptor is a linked protector. +func (m *Mount) GetRegularProtector(descriptor string, trustedUser *user.User) (*metadata.ProtectorData, error) { + if err := m.CheckSetup(trustedUser); err != nil { + return nil, err + } + data := new(metadata.ProtectorData) + path := m.protectorPath(descriptor) + owner, err := m.getMetadata(path, trustedUser, data) + if os.IsNotExist(err) { + err = &ErrProtectorNotFound{descriptor, m} + } + if err != nil { + return nil, err + } + // Login protectors have their UID stored in the file. Since normally + // any user can create files in the fscrypt metadata directories, for a + // login protector to be considered valid it *must* be owned by the + // claimed user or by root. Note: fscrypt v0.3.2 and later always makes + // login protectors owned by the user, but previous versions could + // create them owned by root -- that is the main reason we allow root. + if data.Source == metadata.SourceType_pam_passphrase && owner != 0 && owner != data.Uid { + log.Printf("WARNING: %q claims to be the login protector for uid %d, but it is owned by uid %d. Needs to be %d or 0.", + path, data.Uid, owner, data.Uid) + return nil, &ErrCorruptMetadata{path, errors.New("login protector belongs to wrong user")} + } + return data, nil +} + +// GetProtector returns the Mount of the filesystem containing the information +// and that protector's data. If the descriptor is a regular (not linked) +// protector, the mount will return itself. +func (m *Mount) GetProtector(descriptor string, trustedUser *user.User) (*Mount, *metadata.ProtectorData, error) { + if err := m.CheckSetup(trustedUser); err != nil { + return nil, nil, err + } + // Get the link data from the link file + path := m.linkedProtectorPath(descriptor) + link, _, err := readMetadataFileSafe(path, trustedUser) + if err != nil { + // If the link doesn't exist, try for a regular protector. + if os.IsNotExist(err) { + data, err := m.GetRegularProtector(descriptor, trustedUser) + return m, data, err + } + return nil, nil, err + } + log.Printf("following protector link %s", path) + linkedMnt, err := getMountFromLink(string(link)) + if err != nil { + return nil, nil, errors.Wrap(err, path) + } + data, err := linkedMnt.GetRegularProtector(descriptor, trustedUser) + if err != nil { + return nil, nil, &ErrFollowLink{string(link), err} + } + return linkedMnt, data, nil +} + +// RemoveProtector deletes the protector metadata (or a link to another +// filesystem's metadata) from the filesystem storage. +func (m *Mount) RemoveProtector(descriptor string) error { + if err := m.CheckSetup(nil); err != nil { + return err + } + // We first try to remove the linkedProtector. If that metadata does not + // exist, we try to remove the normal protector. + err := m.removeMetadata(m.linkedProtectorPath(descriptor)) + if os.IsNotExist(err) { + err = m.removeMetadata(m.protectorPath(descriptor)) + if os.IsNotExist(err) { + err = &ErrProtectorNotFound{descriptor, m} + } + } + return err +} + +// ListProtectors lists the descriptors of all protectors on this filesystem. +// This does not include linked protectors. If trustedUser is non-nil, then +// the protectors are restricted to those owned by the given user or by root. +func (m *Mount) ListProtectors(trustedUser *user.User) ([]string, error) { + return m.listMetadata(m.ProtectorDir(), "protectors", trustedUser) +} + +// AddPolicy adds the policy metadata to the filesystem storage. +func (m *Mount) AddPolicy(data *metadata.PolicyData, owner *user.User) error { + if err := m.CheckSetup(nil); err != nil { + return err + } + + return m.addMetadata(m.PolicyPath(data.KeyDescriptor), data, owner) +} + +// GetPolicy looks up the policy metadata by descriptor. +func (m *Mount) GetPolicy(descriptor string, trustedUser *user.User) (*metadata.PolicyData, error) { + if err := m.CheckSetup(trustedUser); err != nil { + return nil, err + } + data := new(metadata.PolicyData) + _, err := m.getMetadata(m.PolicyPath(descriptor), trustedUser, data) + if os.IsNotExist(err) { + err = &ErrPolicyNotFound{descriptor, m} + } + return data, err +} + +// RemovePolicy deletes the policy metadata from the filesystem storage. +func (m *Mount) RemovePolicy(descriptor string) error { + if err := m.CheckSetup(nil); err != nil { + return err + } + err := m.removeMetadata(m.PolicyPath(descriptor)) + if os.IsNotExist(err) { + err = &ErrPolicyNotFound{descriptor, m} + } + return err +} + +// ListPolicies lists the descriptors of all policies on this filesystem. If +// trustedUser is non-nil, then the policies are restricted to those owned by +// the given user or by root. +func (m *Mount) ListPolicies(trustedUser *user.User) ([]string, error) { + return m.listMetadata(m.PolicyDir(), "policies", trustedUser) +} + +type namesAndTimes struct { + names []string + times []time.Time +} + +func (c namesAndTimes) Len() int { + return len(c.names) +} + +func (c namesAndTimes) Less(i, j int) bool { + return c.times[i].Before(c.times[j]) +} + +func (c namesAndTimes) Swap(i, j int) { + c.names[i], c.names[j] = c.names[j], c.names[i] + c.times[i], c.times[j] = c.times[j], c.times[i] +} + +func sortFileListByLastMtime(directoryPath string, names []string) error { + c := namesAndTimes{names: names, times: make([]time.Time, len(names))} + for i, name := range names { + fi, err := os.Lstat(filepath.Join(directoryPath, name)) + if err != nil { + return err + } + c.times[i] = fi.ModTime() + } + sort.Sort(c) + return nil +} + +// listDirectory returns a list of descriptors for a metadata directory, +// including files which are links to other filesystem's metadata. +func (m *Mount) listDirectory(directoryPath string) ([]string, error) { + dir, err := os.Open(directoryPath) + if err != nil { + return nil, err + } + defer dir.Close() + + names, err := dir.Readdirnames(-1) + if err != nil { + return nil, err + } + + if SortDescriptorsByLastMtime { + if err := sortFileListByLastMtime(directoryPath, names); err != nil { + return nil, err + } + } + + descriptors := make([]string, 0, len(names)) + for _, name := range names { + // Be sure to include links as well + descriptors = append(descriptors, strings.TrimSuffix(name, linkFileExtension)) + } + return descriptors, nil +} + +func (m *Mount) listMetadata(dirPath string, metadataType string, owner *user.User) ([]string, error) { + log.Printf("listing %s in %q", metadataType, dirPath) + if err := m.CheckSetup(owner); err != nil { + return nil, err + } + names, err := m.listDirectory(dirPath) + if err != nil { + return nil, err + } + filesIgnoredDescription := "" + if owner != nil { + filteredNames := make([]string, 0, len(names)) + uid := uint32(util.AtoiOrPanic(owner.Uid)) + for _, name := range names { + info, err := os.Lstat(filepath.Join(dirPath, name)) + if err != nil { + continue + } + fileUID := info.Sys().(*syscall.Stat_t).Uid + if fileUID != uid && fileUID != 0 { + continue + } + filteredNames = append(filteredNames, name) + } + numIgnored := len(names) - len(filteredNames) + if numIgnored != 0 { + filesIgnoredDescription = + fmt.Sprintf(" (ignored %d %s not owned by %s or root)", + numIgnored, metadataType, owner.Username) + } + names = filteredNames + } + log.Printf("found %d %s%s", len(names), metadataType, filesIgnoredDescription) + return names, nil +} diff --git a/vendor/github.com/google/fscrypt/filesystem/mountpoint.go b/vendor/github.com/google/fscrypt/filesystem/mountpoint.go new file mode 100644 index 000000000..0b0693b2b --- /dev/null +++ b/vendor/github.com/google/fscrypt/filesystem/mountpoint.go @@ -0,0 +1,578 @@ +/* + * mountpoint.go - Contains all the functionality for finding mountpoints and + * using UUIDs to refer to them. Specifically, we can find the mountpoint of a + * path, get info about a mountpoint, and find mountpoints with a specific UUID. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package filesystem + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + + "github.com/pkg/errors" +) + +var ( + // These maps hold data about the state of the system's filesystems. + // + // They only contain one Mount per filesystem, even if there are + // additional bind mounts, since we want to store fscrypt metadata in + // only one place per filesystem. When it is ambiguous which Mount + // should be used for a filesystem, mountsByDevice will contain an + // explicit nil entry, and mountsByPath won't contain an entry. + mountsByDevice map[DeviceNumber]*Mount + mountsByPath map[string]*Mount + // Used to make the mount functions thread safe + mountMutex sync.Mutex + // True if the maps have been successfully initialized. + mountsInitialized bool + // Supported tokens for filesystem links + uuidToken = "UUID" + pathToken = "PATH" + // Location to perform UUID lookup + uuidDirectory = "/dev/disk/by-uuid" +) + +// Unescape octal-encoded escape sequences in a string from the mountinfo file. +// The kernel encodes the ' ', '\t', '\n', and '\\' bytes this way. This +// function exactly inverts what the kernel does, including by preserving +// invalid UTF-8. +func unescapeString(str string) string { + var sb strings.Builder + for i := 0; i < len(str); i++ { + b := str[i] + if b == '\\' && i+3 < len(str) { + if parsed, err := strconv.ParseInt(str[i+1:i+4], 8, 8); err == nil { + b = uint8(parsed) + i += 3 + } + } + sb.WriteByte(b) + } + return sb.String() +} + +// EscapeString is the reverse of unescapeString. Use this to avoid injecting +// spaces or newlines into output that uses these characters as separators. +func EscapeString(str string) string { + var sb strings.Builder + for _, b := range []byte(str) { + switch b { + case ' ', '\t', '\n', '\\': + sb.WriteString(fmt.Sprintf("\\%03o", b)) + default: + sb.WriteByte(b) + } + } + return sb.String() +} + +// We get the device name via the device number rather than use the mount source +// field directly. This is necessary to handle a rootfs that was mounted via +// the kernel command line, since mountinfo always shows /dev/root for that. +// This assumes that the device nodes are in the standard location. +func getDeviceName(num DeviceNumber) string { + linkPath := fmt.Sprintf("/sys/dev/block/%v", num) + if target, err := os.Readlink(linkPath); err == nil { + return fmt.Sprintf("/dev/%s", filepath.Base(target)) + } + return "" +} + +// Parse one line of /proc/self/mountinfo. +// +// The line contains the following space-separated fields: +// [0] mount ID +// [1] parent ID +// [2] major:minor +// [3] root +// [4] mount point +// [5] mount options +// [6...n-1] optional field(s) +// [n] separator +// [n+1] filesystem type +// [n+2] mount source +// [n+3] super options +// +// For more details, see https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func parseMountInfoLine(line string) *Mount { + fields := strings.Split(line, " ") + if len(fields) < 10 { + return nil + } + + // Count the optional fields. In case new fields are appended later, + // don't simply assume that n == len(fields) - 4. + n := 6 + for fields[n] != "-" { + n++ + if n >= len(fields) { + return nil + } + } + if n+3 >= len(fields) { + return nil + } + + var mnt *Mount = &Mount{} + var err error + mnt.DeviceNumber, err = newDeviceNumberFromString(fields[2]) + if err != nil { + return nil + } + mnt.Subtree = unescapeString(fields[3]) + mnt.Path = unescapeString(fields[4]) + for _, opt := range strings.Split(fields[5], ",") { + if opt == "ro" { + mnt.ReadOnly = true + } + } + mnt.FilesystemType = unescapeString(fields[n+1]) + mnt.Device = getDeviceName(mnt.DeviceNumber) + return mnt +} + +type mountpointTreeNode struct { + mount *Mount + parent *mountpointTreeNode + children []*mountpointTreeNode +} + +func addUncontainedSubtreesRecursive(dst map[string]bool, + node *mountpointTreeNode, allUncontainedSubtrees map[string]bool) { + if allUncontainedSubtrees[node.mount.Subtree] { + dst[node.mount.Subtree] = true + } + for _, child := range node.children { + addUncontainedSubtreesRecursive(dst, child, allUncontainedSubtrees) + } +} + +// findMainMount finds the "main" Mount of a filesystem. The "main" Mount is +// where the filesystem's fscrypt metadata is stored. +// +// Normally, there is just one Mount and it's of the entire filesystem +// (mnt.Subtree == "/"). But in general, the filesystem might be mounted in +// multiple places, including "bind mounts" where mnt.Subtree != "/". Also, the +// filesystem might have a combination of read-write and read-only mounts. +// +// To handle most cases, we could just choose a mount with mnt.Subtree == "/", +// preferably a read-write mount. However, that doesn't work in containers +// where the "/" subtree might not be mounted. Here's a real-world example: +// +// mnt.Subtree mnt.Path +// ----------- -------- +// /var/lib/lxc/base/rootfs / +// /var/cache/pacman/pkg /var/cache/pacman/pkg +// /srv/repo/x86_64 /srv/http/x86_64 +// +// In this case, all mnt.Subtree are independent. To handle this case, we must +// choose the Mount whose mnt.Path contains the others, i.e. the first one. +// Note: the fscrypt metadata won't be usable from outside the container since +// it won't be at the real root of the filesystem, but that may be acceptable. +// +// However, we can't look *only* at mnt.Path, since in some cases mnt.Subtree is +// needed to correctly handle bind mounts. For example, in the following case, +// the first Mount should be chosen: +// +// mnt.Subtree mnt.Path +// ----------- -------- +// /foo /foo +// /foo/dir /dir +// +// To solve this, we divide the mounts into non-overlapping trees of mnt.Path. +// Then, we choose one of these trees which contains (exactly or via path +// prefix) *all* mnt.Subtree. We then return the root of this tree. In both +// the above examples, this algorithm returns the first Mount. +func findMainMount(filesystemMounts []*Mount) *Mount { + // Index this filesystem's mounts by path. Note: paths are unique here, + // since non-last mounts were already excluded earlier. + // + // Also build the set of all mounted subtrees. + filesystemMountsByPath := make(map[string]*mountpointTreeNode) + allSubtrees := make(map[string]bool) + for _, mnt := range filesystemMounts { + filesystemMountsByPath[mnt.Path] = &mountpointTreeNode{mount: mnt} + allSubtrees[mnt.Subtree] = true + } + + // Divide the mounts into non-overlapping trees of mountpoints. + for path, mntNode := range filesystemMountsByPath { + for path != "/" && mntNode.parent == nil { + path = filepath.Dir(path) + if parent := filesystemMountsByPath[path]; parent != nil { + mntNode.parent = parent + parent.children = append(parent.children, mntNode) + } + } + } + + // Build the set of mounted subtrees that aren't contained in any other + // mounted subtree. + allUncontainedSubtrees := make(map[string]bool) + for subtree := range allSubtrees { + contained := false + for t := subtree; t != "/" && !contained; { + t = filepath.Dir(t) + contained = allSubtrees[t] + } + if !contained { + allUncontainedSubtrees[subtree] = true + } + } + + // Select the root of a mountpoint tree whose mounted subtrees contain + // *all* mounted subtrees. Equivalently, select a mountpoint tree in + // which every uncontained subtree is mounted. + var mainMount *Mount + for _, mntNode := range filesystemMountsByPath { + mnt := mntNode.mount + if mntNode.parent != nil { + continue + } + uncontainedSubtrees := make(map[string]bool) + addUncontainedSubtreesRecursive(uncontainedSubtrees, mntNode, allUncontainedSubtrees) + if len(uncontainedSubtrees) != len(allUncontainedSubtrees) { + continue + } + // If there's more than one eligible mount, they should have the + // same Subtree. Otherwise it's ambiguous which one to use. + if mainMount != nil && mainMount.Subtree != mnt.Subtree { + log.Printf("Unsupported case: %q (%v) has multiple non-overlapping mounts. This filesystem will be ignored!", + mnt.Device, mnt.DeviceNumber) + return nil + } + // Prefer a read-write mount to a read-only one. + if mainMount == nil || mainMount.ReadOnly { + mainMount = mnt + } + } + return mainMount +} + +// This is separate from loadMountInfo() only for unit testing. +func readMountInfo(r io.Reader) error { + mountsByDevice = make(map[DeviceNumber]*Mount) + mountsByPath = make(map[string]*Mount) + allMountsByDevice := make(map[DeviceNumber][]*Mount) + allMountsByPath := make(map[string]*Mount) + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + mnt := parseMountInfoLine(line) + if mnt == nil { + log.Printf("ignoring invalid mountinfo line %q", line) + continue + } + + // We can only use mountpoints that are directories for fscrypt. + if !isDir(mnt.Path) { + log.Printf("ignoring mountpoint %q because it is not a directory", mnt.Path) + continue + } + + // Note this overrides the info if we have seen the mountpoint + // earlier in the file. This is correct behavior because the + // mountpoints are listed in mount order. + allMountsByPath[mnt.Path] = mnt + } + // For each filesystem, choose a "main" Mount and discard any additional + // bind mounts. fscrypt only cares about the main Mount, since it's + // where the fscrypt metadata is stored. Store all the main Mounts in + // mountsByDevice and mountsByPath so that they can be found later. + for _, mnt := range allMountsByPath { + allMountsByDevice[mnt.DeviceNumber] = + append(allMountsByDevice[mnt.DeviceNumber], mnt) + } + for deviceNumber, filesystemMounts := range allMountsByDevice { + mnt := findMainMount(filesystemMounts) + mountsByDevice[deviceNumber] = mnt // may store an explicit nil entry + if mnt != nil { + mountsByPath[mnt.Path] = mnt + } + } + return nil +} + +// loadMountInfo populates the Mount mappings by parsing /proc/self/mountinfo. +// It returns an error if the Mount mappings cannot be populated. +func loadMountInfo() error { + if !mountsInitialized { + file, err := os.Open("/proc/self/mountinfo") + if err != nil { + return err + } + defer file.Close() + if err := readMountInfo(file); err != nil { + return err + } + mountsInitialized = true + } + return nil +} + +func filesystemLacksMainMountError(deviceNumber DeviceNumber) error { + return errors.Errorf("Device %q (%v) lacks a \"main\" mountpoint in the current mount namespace, so it's ambiguous where to store the fscrypt metadata.", + getDeviceName(deviceNumber), deviceNumber) +} + +// AllFilesystems lists all mounted filesystems ordered by path to their "main" +// Mount. Use CheckSetup() to see if they are set up for use with fscrypt. +func AllFilesystems() ([]*Mount, error) { + mountMutex.Lock() + defer mountMutex.Unlock() + if err := loadMountInfo(); err != nil { + return nil, err + } + + mounts := make([]*Mount, 0, len(mountsByPath)) + for _, mount := range mountsByPath { + mounts = append(mounts, mount) + } + + sort.Sort(PathSorter(mounts)) + return mounts, nil +} + +// UpdateMountInfo updates the filesystem mountpoint maps with the current state +// of the filesystem mountpoints. Returns error if the initialization fails. +func UpdateMountInfo() error { + mountMutex.Lock() + defer mountMutex.Unlock() + mountsInitialized = false + return loadMountInfo() +} + +// FindMount returns the main Mount object for the filesystem which contains the +// file at the specified path. An error is returned if the path is invalid or if +// we cannot load the required mount data. If a mount has been updated since the +// last call to one of the mount functions, run UpdateMountInfo to see changes. +func FindMount(path string) (*Mount, error) { + mountMutex.Lock() + defer mountMutex.Unlock() + if err := loadMountInfo(); err != nil { + return nil, err + } + // First try to find the mount by the number of the containing device. + deviceNumber, err := getNumberOfContainingDevice(path) + if err != nil { + return nil, err + } + mnt, ok := mountsByDevice[deviceNumber] + if ok { + if mnt == nil { + return nil, filesystemLacksMainMountError(deviceNumber) + } + return mnt, nil + } + // The mount couldn't be found by the number of the containing device. + // Fall back to walking up the directory hierarchy and checking for a + // mount at each directory path. This is necessary for btrfs, where + // files report a different st_dev from the /proc/self/mountinfo entry. + curPath, err := canonicalizePath(path) + if err != nil { + return nil, err + } + for { + mnt := mountsByPath[curPath] + if mnt != nil { + return mnt, nil + } + // Move to the parent directory unless we have reached the root. + parent := filepath.Dir(curPath) + if parent == curPath { + return nil, errors.Errorf("couldn't find mountpoint containing %q", path) + } + curPath = parent + } +} + +// GetMount is like FindMount, except GetMount also returns an error if the path +// doesn't name the same file as the filesystem's "main" Mount. For example, if +// a filesystem is fully mounted at "/mnt" and if "/mnt/a" exists, then +// FindMount("/mnt/a") will succeed whereas GetMount("/mnt/a") will fail. This +// is true even if "/mnt/a" is a bind mount of part of the same filesystem. +func GetMount(mountpoint string) (*Mount, error) { + mnt, err := FindMount(mountpoint) + if err != nil { + return nil, &ErrNotAMountpoint{mountpoint} + } + // Check whether 'mountpoint' names the same directory as 'mnt.Path'. + // Use os.SameFile() (i.e., compare inode numbers) rather than compare + // canonical paths, since filesystems may be mounted in multiple places. + fi1, err := os.Stat(mountpoint) + if err != nil { + return nil, err + } + fi2, err := os.Stat(mnt.Path) + if err != nil { + return nil, err + } + if !os.SameFile(fi1, fi2) { + return nil, &ErrNotAMountpoint{mountpoint} + } + return mnt, nil +} + +func uuidToDeviceNumber(uuid string) (DeviceNumber, error) { + uuidSymlinkPath := filepath.Join(uuidDirectory, uuid) + return getDeviceNumber(uuidSymlinkPath) +} + +func deviceNumberToMount(deviceNumber DeviceNumber) (*Mount, bool) { + mountMutex.Lock() + defer mountMutex.Unlock() + if err := loadMountInfo(); err != nil { + log.Print(err) + return nil, false + } + mnt, ok := mountsByDevice[deviceNumber] + return mnt, ok +} + +// getMountFromLink returns the main Mount, if any, for the filesystem which the +// given link points to. The link should contain a series of token-value pairs +// (=), one per line. The supported tokens are "UUID" and "PATH". +// If the UUID is present and it works, then it is used; otherwise, PATH is used +// if it is present. (The fallback from UUID to PATH will keep the link working +// if the UUID of the target filesystem changes but its mountpoint doesn't.) +// +// If a mount has been updated since the last call to one of the mount +// functions, make sure to run UpdateMountInfo first. +func getMountFromLink(link string) (*Mount, error) { + // Parse the link. + uuid := "" + path := "" + lines := strings.Split(link, "\n") + for _, line := range lines { + line := strings.TrimSpace(line) + if line == "" { + continue + } + pair := strings.Split(line, "=") + if len(pair) != 2 { + log.Printf("ignoring invalid line in filesystem link file: %q", line) + continue + } + token := pair[0] + value := pair[1] + switch token { + case uuidToken: + uuid = value + case pathToken: + path = value + default: + log.Printf("ignoring unknown link token %q", token) + } + } + // At least one of UUID and PATH must be present. + if uuid == "" && path == "" { + return nil, &ErrFollowLink{link, errors.Errorf("invalid filesystem link file")} + } + + // Try following the UUID. + errMsg := "" + if uuid != "" { + deviceNumber, err := uuidToDeviceNumber(uuid) + if err == nil { + mnt, ok := deviceNumberToMount(deviceNumber) + if mnt != nil { + log.Printf("resolved filesystem link using UUID %q", uuid) + return mnt, nil + } + if ok { + return nil, &ErrFollowLink{link, filesystemLacksMainMountError(deviceNumber)} + } + log.Printf("cannot find filesystem with UUID %q", uuid) + } else { + log.Printf("cannot find filesystem with UUID %q: %v", uuid, err) + } + errMsg += fmt.Sprintf("cannot find filesystem with UUID %q", uuid) + if path != "" { + log.Printf("falling back to using mountpoint path instead of UUID") + } + } + // UUID didn't work. As a fallback, try the mountpoint path. + if path != "" { + mnt, err := GetMount(path) + if mnt != nil { + log.Printf("resolved filesystem link using mountpoint path %q", path) + return mnt, nil + } + log.Print(err) + if errMsg == "" { + errMsg = fmt.Sprintf("cannot find filesystem with main mountpoint %q", path) + } else { + errMsg += fmt.Sprintf(" or main mountpoint %q", path) + } + } + // No method worked; return an error. + return nil, &ErrFollowLink{link, errors.New(errMsg)} +} + +func (mnt *Mount) getFilesystemUUID() (string, error) { + dirContents, err := ioutil.ReadDir(uuidDirectory) + if err != nil { + return "", err + } + for _, fileInfo := range dirContents { + if fileInfo.Mode()&os.ModeSymlink == 0 { + continue // Only interested in UUID symlinks + } + uuid := fileInfo.Name() + deviceNumber, err := uuidToDeviceNumber(uuid) + if err != nil { + log.Print(err) + continue + } + if mnt.DeviceNumber == deviceNumber { + return uuid, nil + } + } + return "", errors.Errorf("cannot determine UUID of device %q (%v)", + mnt.Device, mnt.DeviceNumber) +} + +// makeLink creates the contents of a link file which will point to the given +// filesystem. This will normally be a string of the form +// "UUID=\nPATH=\n". If the UUID cannot be determined, the UUID +// portion will be omitted. +func makeLink(mnt *Mount) (string, error) { + uuid, err := mnt.getFilesystemUUID() + if err != nil { + // The UUID could not be determined. This happens for btrfs + // filesystems, as the device number found via + // /dev/disk/by-uuid/* for btrfs filesystems differs from the + // actual device number of the mounted filesystem. Just rely + // entirely on the fallback to mountpoint path. + log.Print(err) + return fmt.Sprintf("%s=%s\n", pathToken, mnt.Path), nil + } + return fmt.Sprintf("%s=%s\n%s=%s\n", uuidToken, uuid, pathToken, mnt.Path), nil +} diff --git a/vendor/github.com/google/fscrypt/filesystem/path.go b/vendor/github.com/google/fscrypt/filesystem/path.go new file mode 100644 index 000000000..8cfb23574 --- /dev/null +++ b/vendor/github.com/google/fscrypt/filesystem/path.go @@ -0,0 +1,128 @@ +/* + * path.go - Utility functions for dealing with filesystem paths + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package filesystem + +import ( + "fmt" + "log" + "os" + "path/filepath" + + "golang.org/x/sys/unix" + + "github.com/pkg/errors" +) + +// OpenFileOverridingUmask calls os.OpenFile but with the umask overridden so +// that no permission bits are masked out if the file is created. +func OpenFileOverridingUmask(name string, flag int, perm os.FileMode) (*os.File, error) { + oldMask := unix.Umask(0) + defer unix.Umask(oldMask) + return os.OpenFile(name, flag, perm) +} + +// canonicalizePath turns path into an absolute path without symlinks. +func canonicalizePath(path string) (string, error) { + path, err := filepath.Abs(path) + if err != nil { + return "", err + } + path, err = filepath.EvalSymlinks(path) + + // Get a better error if we have an invalid path + if pathErr, ok := err.(*os.PathError); ok { + err = errors.Wrap(pathErr.Err, pathErr.Path) + } + + return path, err +} + +// loggedStat runs os.Stat, but it logs the error if stat returns any error +// other than nil or IsNotExist. +func loggedStat(name string) (os.FileInfo, error) { + info, err := os.Stat(name) + if err != nil && !os.IsNotExist(err) { + log.Print(err) + } + return info, err +} + +// loggedLstat runs os.Lstat (doesn't dereference trailing symlink), but it logs +// the error if lstat returns any error other than nil or IsNotExist. +func loggedLstat(name string) (os.FileInfo, error) { + info, err := os.Lstat(name) + if err != nil && !os.IsNotExist(err) { + log.Print(err) + } + return info, err +} + +// isDir returns true if the path exists and is that of a directory. +func isDir(path string) bool { + info, err := loggedStat(path) + return err == nil && info.IsDir() +} + +// isRegularFile returns true if the path exists and is that of a regular file. +func isRegularFile(path string) bool { + info, err := loggedStat(path) + return err == nil && info.Mode().IsRegular() +} + +// HaveReadAccessTo returns true if the process has read access to a file or +// directory, without actually opening it. +func HaveReadAccessTo(path string) bool { + return unix.Access(path, unix.R_OK) == nil +} + +// DeviceNumber represents a combined major:minor device number. +type DeviceNumber uint64 + +func (num DeviceNumber) String() string { + return fmt.Sprintf("%d:%d", unix.Major(uint64(num)), unix.Minor(uint64(num))) +} + +func newDeviceNumberFromString(str string) (DeviceNumber, error) { + var major, minor uint32 + if count, _ := fmt.Sscanf(str, "%d:%d", &major, &minor); count != 2 { + return 0, errors.Errorf("invalid device number string %q", str) + } + return DeviceNumber(unix.Mkdev(major, minor)), nil +} + +// getDeviceNumber returns the device number of the device node at the given +// path. If there is a symlink at the path, it is dereferenced. +func getDeviceNumber(path string) (DeviceNumber, error) { + var stat unix.Stat_t + if err := unix.Stat(path, &stat); err != nil { + return 0, err + } + return DeviceNumber(stat.Rdev), nil +} + +// getNumberOfContainingDevice returns the device number of the filesystem which +// contains the given file. If the file is a symlink, it is not dereferenced. +func getNumberOfContainingDevice(path string) (DeviceNumber, error) { + var stat unix.Stat_t + if err := unix.Lstat(path, &stat); err != nil { + return 0, err + } + return DeviceNumber(stat.Dev), nil +} diff --git a/vendor/github.com/google/fscrypt/keyring/fs_keyring.go b/vendor/github.com/google/fscrypt/keyring/fs_keyring.go new file mode 100644 index 000000000..9b949b9ea --- /dev/null +++ b/vendor/github.com/google/fscrypt/keyring/fs_keyring.go @@ -0,0 +1,326 @@ +/* + * fs_keyring.go - Add/remove encryption policy keys to/from filesystem + * + * Copyright 2019 Google LLC + * Author: Eric Biggers (ebiggers@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package keyring + +/* +#include +*/ +import "C" + +import ( + "encoding/hex" + "log" + "os" + "os/user" + "sync" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/security" + "github.com/google/fscrypt/util" +) + +var ( + fsKeyringSupported bool + fsKeyringSupportedKnown bool + fsKeyringSupportedLock sync.Mutex +) + +func checkForFsKeyringSupport(mount *filesystem.Mount) bool { + dir, err := os.Open(mount.Path) + if err != nil { + log.Printf("Unexpected error opening %q. Assuming filesystem keyring is unsupported.", + mount.Path) + return false + } + defer dir.Close() + + // FS_IOC_ADD_ENCRYPTION_KEY with a NULL argument will fail with ENOTTY + // if the ioctl isn't supported. Otherwise it should fail with EFAULT. + // + // Note that there's no need to check for FS_IOC_REMOVE_ENCRYPTION_KEY + // support separately, since it's guaranteed to be available if + // FS_IOC_ADD_ENCRYPTION_KEY is. There's also no need to check for + // support on every filesystem separately, since either the kernel + // supports the ioctls on all fscrypt-capable filesystems or it doesn't. + _, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(), unix.FS_IOC_ADD_ENCRYPTION_KEY, 0) + if errno == unix.ENOTTY { + log.Printf("Kernel doesn't support filesystem keyring. Falling back to user keyring.") + return false + } + if errno == unix.EFAULT { + log.Printf("Detected support for filesystem keyring") + } else { + // EFAULT is expected, but as long as we didn't get ENOTTY the + // ioctl should be available. + log.Printf("Unexpected error from FS_IOC_ADD_ENCRYPTION_KEY(%q, NULL): %v", mount.Path, errno) + } + return true +} + +// IsFsKeyringSupported returns true if the kernel supports the ioctls to +// add/remove fscrypt keys directly to/from the filesystem. For support to be +// detected, the given Mount must be for a filesystem that supports fscrypt. +func IsFsKeyringSupported(mount *filesystem.Mount) bool { + fsKeyringSupportedLock.Lock() + defer fsKeyringSupportedLock.Unlock() + if !fsKeyringSupportedKnown { + fsKeyringSupported = checkForFsKeyringSupport(mount) + fsKeyringSupportedKnown = true + } + return fsKeyringSupported +} + +// buildKeySpecifier converts the key descriptor string to an FscryptKeySpecifier. +func buildKeySpecifier(spec *unix.FscryptKeySpecifier, descriptor string) error { + descriptorBytes, err := hex.DecodeString(descriptor) + if err != nil { + return errors.Errorf("key descriptor %q is invalid", descriptor) + } + switch len(descriptorBytes) { + case unix.FSCRYPT_KEY_DESCRIPTOR_SIZE: + spec.Type = unix.FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR + case unix.FSCRYPT_KEY_IDENTIFIER_SIZE: + spec.Type = unix.FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER + default: + return errors.Errorf("key descriptor %q has unknown length", descriptor) + } + copy(spec.U[:], descriptorBytes) + return nil +} + +type savedPrivs struct { + ruid, euid, suid int +} + +// dropPrivsIfNeeded drops privileges (UIDs only) to the given user if we're +// working with a v2 policy key, and if the user is different from the user the +// process is currently running as. +// +// This is needed to change the effective UID so that FS_IOC_ADD_ENCRYPTION_KEY +// and FS_IOC_REMOVE_ENCRYPTION_KEY will add/remove a claim to the key for the +// intended user, and so that FS_IOC_GET_ENCRYPTION_KEY_STATUS will return the +// correct status flags for the user. +func dropPrivsIfNeeded(user *user.User, spec *unix.FscryptKeySpecifier) (*savedPrivs, error) { + if spec.Type == unix.FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR { + // v1 policy keys don't have any concept of user claims. + return nil, nil + } + targetUID := util.AtoiOrPanic(user.Uid) + ruid, euid, suid := security.GetUids() + if euid == targetUID { + return nil, nil + } + if err := security.SetUids(targetUID, targetUID, euid); err != nil { + return nil, err + } + return &savedPrivs{ruid, euid, suid}, nil +} + +// restorePrivs restores root privileges if needed. +func restorePrivs(privs *savedPrivs) error { + if privs != nil { + return security.SetUids(privs.ruid, privs.euid, privs.suid) + } + return nil +} + +// validateKeyDescriptor validates that the correct key descriptor was provided. +// This isn't really necessary; this is just an extra sanity check. +func validateKeyDescriptor(spec *unix.FscryptKeySpecifier, descriptor string) (string, error) { + if spec.Type != unix.FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER { + // v1 policy key: the descriptor is chosen arbitrarily by + // userspace, so there's nothing to validate. + return descriptor, nil + } + // v2 policy key. The descriptor ("identifier" in the kernel UAPI) is + // calculated as a cryptographic hash of the key itself. The kernel + // ignores the provided value, and calculates and returns it itself. So + // verify that the returned value is as expected. If it's not, the key + // doesn't actually match the encryption policy we thought it was for. + actual := hex.EncodeToString(spec.U[:unix.FSCRYPT_KEY_IDENTIFIER_SIZE]) + if descriptor == actual { + return descriptor, nil + } + return actual, + errors.Errorf("provided and actual key descriptors differ (%q != %q)", + descriptor, actual) +} + +// fsAddEncryptionKey adds the specified encryption key to the specified filesystem. +func fsAddEncryptionKey(key *crypto.Key, descriptor string, + mount *filesystem.Mount, user *user.User) error { + + dir, err := os.Open(mount.Path) + if err != nil { + return err + } + defer dir.Close() + + argKey, err := crypto.NewBlankKey(int(unsafe.Sizeof(unix.FscryptAddKeyArg{})) + key.Len()) + if err != nil { + return err + } + defer argKey.Wipe() + arg := (*unix.FscryptAddKeyArg)(argKey.UnsafePtr()) + + if err = buildKeySpecifier(&arg.Key_spec, descriptor); err != nil { + return err + } + + raw := unsafe.Pointer(uintptr(argKey.UnsafePtr()) + unsafe.Sizeof(*arg)) + arg.Raw_size = uint32(key.Len()) + C.memcpy(raw, key.UnsafePtr(), C.size_t(key.Len())) + + savedPrivs, err := dropPrivsIfNeeded(user, &arg.Key_spec) + if err != nil { + return err + } + _, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(), + unix.FS_IOC_ADD_ENCRYPTION_KEY, uintptr(argKey.UnsafePtr())) + restorePrivs(savedPrivs) + + log.Printf("FS_IOC_ADD_ENCRYPTION_KEY(%q, %s, ) = %v", mount.Path, descriptor, errno) + if errno != 0 { + return errors.Wrapf(errno, + "error adding key with descriptor %s to filesystem %s", + descriptor, mount.Path) + } + if descriptor, err = validateKeyDescriptor(&arg.Key_spec, descriptor); err != nil { + fsRemoveEncryptionKey(descriptor, mount, user) + return err + } + return nil +} + +// fsRemoveEncryptionKey removes the specified encryption key from the specified +// filesystem. +func fsRemoveEncryptionKey(descriptor string, mount *filesystem.Mount, + user *user.User) error { + + dir, err := os.Open(mount.Path) + if err != nil { + return err + } + defer dir.Close() + + var arg unix.FscryptRemoveKeyArg + if err = buildKeySpecifier(&arg.Key_spec, descriptor); err != nil { + return err + } + + ioc := uintptr(unix.FS_IOC_REMOVE_ENCRYPTION_KEY) + iocName := "FS_IOC_REMOVE_ENCRYPTION_KEY" + var savedPrivs *savedPrivs + if user == nil { + ioc = unix.FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS + iocName = "FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS" + } else { + savedPrivs, err = dropPrivsIfNeeded(user, &arg.Key_spec) + if err != nil { + return err + } + } + _, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(), ioc, uintptr(unsafe.Pointer(&arg))) + restorePrivs(savedPrivs) + + log.Printf("%s(%q, %s) = %v, removal_status_flags=0x%x", + iocName, mount.Path, descriptor, errno, arg.Removal_status_flags) + switch errno { + case 0: + switch { + case arg.Removal_status_flags&unix.FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS != 0: + return ErrKeyAddedByOtherUsers + case arg.Removal_status_flags&unix.FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY != 0: + return ErrKeyFilesOpen + } + return nil + case unix.ENOKEY: + // ENOKEY means either the key is completely missing or that the + // current user doesn't have a claim to it. Distinguish between + // these two cases by getting the key status. + if user != nil { + status, _ := fsGetEncryptionKeyStatus(descriptor, mount, user) + if status == KeyPresentButOnlyOtherUsers { + return ErrKeyAddedByOtherUsers + } + } + return ErrKeyNotPresent + default: + return errors.Wrapf(errno, + "error removing key with descriptor %s from filesystem %s", + descriptor, mount.Path) + } +} + +// fsGetEncryptionKeyStatus gets the status of the specified encryption key on +// the specified filesystem. +func fsGetEncryptionKeyStatus(descriptor string, mount *filesystem.Mount, + user *user.User) (KeyStatus, error) { + + dir, err := os.Open(mount.Path) + if err != nil { + return KeyStatusUnknown, err + } + defer dir.Close() + + var arg unix.FscryptGetKeyStatusArg + err = buildKeySpecifier(&arg.Key_spec, descriptor) + if err != nil { + return KeyStatusUnknown, err + } + + savedPrivs, err := dropPrivsIfNeeded(user, &arg.Key_spec) + if err != nil { + return KeyStatusUnknown, err + } + _, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(), + unix.FS_IOC_GET_ENCRYPTION_KEY_STATUS, uintptr(unsafe.Pointer(&arg))) + restorePrivs(savedPrivs) + + log.Printf("FS_IOC_GET_ENCRYPTION_KEY_STATUS(%q, %s) = %v, status=%d, status_flags=0x%x", + mount.Path, descriptor, errno, arg.Status, arg.Status_flags) + if errno != 0 { + return KeyStatusUnknown, + errors.Wrapf(errno, + "error getting status of key with descriptor %s on filesystem %s", + descriptor, mount.Path) + } + switch arg.Status { + case unix.FSCRYPT_KEY_STATUS_ABSENT: + return KeyAbsent, nil + case unix.FSCRYPT_KEY_STATUS_PRESENT: + if arg.Key_spec.Type != unix.FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && + (arg.Status_flags&unix.FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF) == 0 { + return KeyPresentButOnlyOtherUsers, nil + } + return KeyPresent, nil + case unix.FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED: + return KeyAbsentButFilesBusy, nil + default: + return KeyStatusUnknown, + errors.Errorf("unknown key status (%d) for key with descriptor %s on filesystem %s", + arg.Status, descriptor, mount.Path) + } +} diff --git a/vendor/github.com/google/fscrypt/keyring/keyring.go b/vendor/github.com/google/fscrypt/keyring/keyring.go new file mode 100644 index 000000000..5ddceaf8b --- /dev/null +++ b/vendor/github.com/google/fscrypt/keyring/keyring.go @@ -0,0 +1,175 @@ +/* + * keyring.go - Add/remove encryption policy keys to/from kernel + * + * Copyright 2019 Google LLC + * Author: Eric Biggers (ebiggers@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package keyring manages adding, removing, and getting the status of +// encryption policy keys to/from the kernel. Most public functions are in +// keyring.go, and they delegate to either user_keyring.go or fs_keyring.go, +// depending on whether a user keyring or a filesystem keyring is being used. +// +// v2 encryption policies always use the filesystem keyring. +// v1 policies use the user keyring by default, but can be configured to use the +// filesystem keyring instead (requires root and kernel v5.4+). +package keyring + +import ( + "encoding/hex" + "os/user" + "strconv" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// Keyring error values +var ( + ErrKeyAddedByOtherUsers = errors.New("other users have added the key too") + ErrKeyFilesOpen = errors.New("some files using the key are still open") + ErrKeyNotPresent = errors.New("key not present or already removed") + ErrV2PoliciesUnsupported = errors.New("kernel is too old to support v2 encryption policies") +) + +// Options are the options which specify *which* keyring the key should be +// added/removed/gotten to, and how. +type Options struct { + // Mount is the filesystem to which the key should be + // added/removed/gotten. + Mount *filesystem.Mount + // User is the user for whom the key should be added/removed/gotten. + User *user.User + // UseFsKeyringForV1Policies is true if keys for v1 encryption policies + // should be put in the filesystem's keyring (if supported) rather than + // in the user's keyring. Note that this makes AddEncryptionKey and + // RemoveEncryptionKey require root privileges. + UseFsKeyringForV1Policies bool +} + +func shouldUseFsKeyring(descriptor string, options *Options) (bool, error) { + // For v1 encryption policy keys, use the filesystem keyring if + // use_fs_keyring_for_v1_policies is set in /etc/fscrypt.conf and the + // kernel supports it. + if len(descriptor) == hex.EncodedLen(unix.FSCRYPT_KEY_DESCRIPTOR_SIZE) { + return options.UseFsKeyringForV1Policies && IsFsKeyringSupported(options.Mount), nil + } + // For v2 encryption policy keys, always use the filesystem keyring; the + // kernel doesn't support any other way. + if !IsFsKeyringSupported(options.Mount) { + return true, ErrV2PoliciesUnsupported + } + return true, nil +} + +// buildKeyDescription builds the description for an fscrypt key of type +// "logon". For ext4 and f2fs, it uses the legacy filesystem-specific prefixes +// for compatibility with kernels before v4.8 and v4.6 respectively. For other +// filesystems it uses the generic prefix "fscrypt". +func buildKeyDescription(options *Options, descriptor string) string { + switch options.Mount.FilesystemType { + case "ext4", "f2fs": + return options.Mount.FilesystemType + ":" + descriptor + default: + return unix.FSCRYPT_KEY_DESC_PREFIX + descriptor + } +} + +// AddEncryptionKey adds an encryption policy key to a kernel keyring. It uses +// either the filesystem keyring for the target Mount or the user keyring for +// the target User. +func AddEncryptionKey(key *crypto.Key, descriptor string, options *Options) error { + if err := util.CheckValidLength(metadata.PolicyKeyLen, key.Len()); err != nil { + return errors.Wrap(err, "policy key") + } + useFsKeyring, err := shouldUseFsKeyring(descriptor, options) + if err != nil { + return err + } + if useFsKeyring { + return fsAddEncryptionKey(key, descriptor, options.Mount, options.User) + } + return userAddKey(key, buildKeyDescription(options, descriptor), options.User) +} + +// RemoveEncryptionKey removes an encryption policy key from a kernel keyring. +// It uses either the filesystem keyring for the target Mount or the user +// keyring for the target User. +func RemoveEncryptionKey(descriptor string, options *Options, allUsers bool) error { + useFsKeyring, err := shouldUseFsKeyring(descriptor, options) + if err != nil { + return err + } + if useFsKeyring { + user := options.User + if allUsers { + user = nil + } + return fsRemoveEncryptionKey(descriptor, options.Mount, user) + } + return userRemoveKey(buildKeyDescription(options, descriptor), options.User) +} + +// KeyStatus is an enum that represents the status of a key in a kernel keyring. +type KeyStatus int + +// The possible values of KeyStatus. +const ( + KeyStatusUnknown = 0 + iota + KeyAbsent + KeyAbsentButFilesBusy + KeyPresent + KeyPresentButOnlyOtherUsers +) + +func (status KeyStatus) String() string { + switch status { + case KeyStatusUnknown: + return "Unknown" + case KeyAbsent: + return "Absent" + case KeyAbsentButFilesBusy: + return "AbsentButFilesBusy" + case KeyPresent: + return "Present" + case KeyPresentButOnlyOtherUsers: + return "PresentButOnlyOtherUsers" + default: + return strconv.Itoa(int(status)) + } +} + +// GetEncryptionKeyStatus gets the status of an encryption policy key in a +// kernel keyring. It uses either the filesystem keyring for the target Mount +// or the user keyring for the target User. +func GetEncryptionKeyStatus(descriptor string, options *Options) (KeyStatus, error) { + useFsKeyring, err := shouldUseFsKeyring(descriptor, options) + if err != nil { + return KeyStatusUnknown, err + } + if useFsKeyring { + return fsGetEncryptionKeyStatus(descriptor, options.Mount, options.User) + } + _, _, err = userFindKey(buildKeyDescription(options, descriptor), options.User) + if err != nil { + return KeyAbsent, nil + } + return KeyPresent, nil +} diff --git a/vendor/github.com/google/fscrypt/keyring/user_keyring.go b/vendor/github.com/google/fscrypt/keyring/user_keyring.go new file mode 100644 index 000000000..0ea468957 --- /dev/null +++ b/vendor/github.com/google/fscrypt/keyring/user_keyring.go @@ -0,0 +1,251 @@ +/* + * user_keyring.go - Add/remove encryption policy keys to/from user keyrings. + * This is the deprecated mechanism; see fs_keyring.go for the new mechanism. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package keyring + +import ( + "os/user" + "runtime" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "fmt" + "log" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/security" + "github.com/google/fscrypt/util" +) + +// ErrAccessUserKeyring indicates that a user's keyring cannot be +// accessed. +type ErrAccessUserKeyring struct { + TargetUser *user.User + UnderlyingError error +} + +func (err *ErrAccessUserKeyring) Error() string { + return fmt.Sprintf("could not access user keyring for %q: %s", + err.TargetUser.Username, err.UnderlyingError) +} + +// ErrSessionUserKeyring indicates that a user's keyring is not linked +// into the session keyring. +type ErrSessionUserKeyring struct { + TargetUser *user.User +} + +func (err *ErrSessionUserKeyring) Error() string { + return fmt.Sprintf("user keyring for %q is not linked into the session keyring", + err.TargetUser.Username) +} + +// KeyType is always logon as required by filesystem encryption. +const KeyType = "logon" + +// userAddKey puts the provided policy key into the user keyring for the +// specified user with the provided description, and type logon. +func userAddKey(key *crypto.Key, description string, targetUser *user.User) error { + runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring + defer runtime.UnlockOSThread() + + // Create our payload (containing an FscryptKey) + payload, err := crypto.NewBlankKey(int(unsafe.Sizeof(unix.FscryptKey{}))) + if err != nil { + return err + } + defer payload.Wipe() + + // Cast the payload to an FscryptKey so we can initialize the fields. + fscryptKey := (*unix.FscryptKey)(payload.UnsafePtr()) + // Mode is ignored by the kernel + fscryptKey.Mode = 0 + fscryptKey.Size = uint32(key.Len()) + copy(fscryptKey.Raw[:], key.Data()) + + keyringID, err := UserKeyringID(targetUser, true) + if err != nil { + return err + } + keyID, err := unix.AddKey(KeyType, description, payload.Data(), keyringID) + log.Printf("KeyctlAddKey(%s, %s, , %d) = %d, %v", + KeyType, description, keyringID, keyID, err) + if err != nil { + return errors.Wrapf(err, + "error adding key with description %s to user keyring for %q", + description, targetUser.Username) + } + return nil +} + +// userRemoveKey tries to remove a policy key from the user keyring with the +// provided description. An error is returned if the key does not exist. +func userRemoveKey(description string, targetUser *user.User) error { + runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring + defer runtime.UnlockOSThread() + + keyID, keyringID, err := userFindKey(description, targetUser) + if err != nil { + return ErrKeyNotPresent + } + + _, err = unix.KeyctlInt(unix.KEYCTL_UNLINK, keyID, keyringID, 0, 0) + log.Printf("KeyctlUnlink(%d, %d) = %v", keyID, keyringID, err) + if err != nil { + return errors.Wrapf(err, + "error removing key with description %s from user keyring for %q", + description, targetUser.Username) + } + return nil +} + +// userFindKey tries to locate a key with the provided description in the user +// keyring for the target user. The key ID and keyring ID are returned if we can +// find the key. An error is returned if the key does not exist. +func userFindKey(description string, targetUser *user.User) (int, int, error) { + runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring + defer runtime.UnlockOSThread() + + keyringID, err := UserKeyringID(targetUser, false) + if err != nil { + return 0, 0, err + } + + keyID, err := unix.KeyctlSearch(keyringID, KeyType, description, 0) + log.Printf("KeyctlSearch(%d, %s, %s) = %d, %v", keyringID, KeyType, description, keyID, err) + if err != nil { + return 0, 0, errors.Wrapf(err, + "error searching for key %s in user keyring for %q", + description, targetUser.Username) + } + return keyID, keyringID, err +} + +// UserKeyringID returns the key id of the target user's user keyring. We also +// ensure that the keyring will be accessible by linking it into the thread +// keyring and linking it into the root user keyring (permissions allowing). If +// checkSession is true, an error is returned if a normal user requests their +// user keyring, but it is not in the current session keyring. +func UserKeyringID(targetUser *user.User, checkSession bool) (int, error) { + runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring + defer runtime.UnlockOSThread() + + uid := util.AtoiOrPanic(targetUser.Uid) + targetKeyring, err := userKeyringIDLookup(uid) + if err != nil { + return 0, &ErrAccessUserKeyring{targetUser, err} + } + + if !util.IsUserRoot() { + // Make sure the returned keyring will be accessible by checking + // that it is in the session keyring. + if checkSession && !isUserKeyringInSession(uid) { + return 0, &ErrSessionUserKeyring{targetUser} + } + return targetKeyring, nil + } + + // Make sure the returned keyring will be accessible by linking it into + // the root user's user keyring (which will not be garbage collected). + rootKeyring, err := userKeyringIDLookup(0) + if err != nil { + return 0, errors.Wrapf(err, "error looking up root's user keyring") + } + + if rootKeyring != targetKeyring { + if err = keyringLink(targetKeyring, rootKeyring); err != nil { + return 0, errors.Wrapf(err, + "error linking user keyring for %q into root's user keyring", + targetUser.Username) + } + } + return targetKeyring, nil +} + +func userKeyringIDLookup(uid int) (keyringID int, err error) { + + // Our goals here are to: + // - Find the user keyring (for the provided uid) + // - Link it into the current thread keyring (so we can use it) + // - Make no permanent changes to the process privileges + // Complicating this are the facts that: + // - The value of KEY_SPEC_USER_KEYRING is determined by the ruid + // - Keyring linking permissions use the euid + // So we have to change both the ruid and euid to make this work, + // setting the suid to 0 so that we can later switch back. + ruid, euid, suid := security.GetUids() + if ruid != uid || euid != uid { + if err = security.SetUids(uid, uid, 0); err != nil { + return + } + defer func() { + resetErr := security.SetUids(ruid, euid, suid) + if resetErr != nil { + err = resetErr + } + }() + } + + // We get the value of KEY_SPEC_USER_KEYRING. Note that this will also + // trigger the creation of the uid keyring if it does not yet exist. + keyringID, err = unix.KeyctlGetKeyringID(unix.KEY_SPEC_USER_KEYRING, true) + log.Printf("keyringID(_uid.%d) = %d, %v", uid, keyringID, err) + if err != nil { + return 0, err + } + + // We still want to use this keyring after our privileges are reset. So + // we link it into the thread keyring, preventing a loss of access. + // + // We must be under LockOSThread() for this to work reliably. Note that + // we can't just use the process keyring, since it doesn't work reliably + // in Go programs, due to the Go runtime creating threads before the + // program starts and has a chance to create the process keyring. + if err = keyringLink(keyringID, unix.KEY_SPEC_THREAD_KEYRING); err != nil { + return 0, err + } + + return keyringID, nil +} + +// isUserKeyringInSession tells us if the user's uid keyring is in the current +// session keyring. +func isUserKeyringInSession(uid int) bool { + // We cannot use unix.KEY_SPEC_SESSION_KEYRING directly as that might + // create a session keyring if one does not exist. + sessionKeyring, err := unix.KeyctlGetKeyringID(unix.KEY_SPEC_SESSION_KEYRING, false) + log.Printf("keyringID(session) = %d, %v", sessionKeyring, err) + if err != nil { + return false + } + + description := fmt.Sprintf("_uid.%d", uid) + id, err := unix.KeyctlSearch(sessionKeyring, "keyring", description, 0) + log.Printf("KeyctlSearch(%d, keyring, %s) = %d, %v", sessionKeyring, description, id, err) + return err == nil +} + +func keyringLink(keyID int, keyringID int) error { + _, err := unix.KeyctlInt(unix.KEYCTL_LINK, keyID, keyringID, 0, 0) + log.Printf("KeyctlLink(%d, %d) = %v", keyID, keyringID, err) + return err +} diff --git a/vendor/github.com/google/fscrypt/metadata/checks.go b/vendor/github.com/google/fscrypt/metadata/checks.go new file mode 100644 index 000000000..84fd208c8 --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/checks.go @@ -0,0 +1,221 @@ +/* + * checks.go - Some sanity check methods for our metadata structures + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package metadata + +import ( + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + + "github.com/google/fscrypt/util" +) + +var errNotInitialized = errors.New("not initialized") + +// Metadata is the interface to all of the protobuf structures that can be +// checked for validity. +type Metadata interface { + CheckValidity() error + proto.Message +} + +// CheckValidity ensures the mode has a name and isn't empty. +func (m EncryptionOptions_Mode) CheckValidity() error { + if m == EncryptionOptions_default { + return errNotInitialized + } + if m.String() == "" { + return errors.Errorf("unknown %d", m) + } + return nil +} + +// CheckValidity ensures the source has a name and isn't empty. +func (s SourceType) CheckValidity() error { + if s == SourceType_default { + return errNotInitialized + } + if s.String() == "" { + return errors.Errorf("unknown %d", s) + } + return nil +} + +// CheckValidity ensures the hash costs will be accepted by Argon2. +func (h *HashingCosts) CheckValidity() error { + if h == nil { + return errNotInitialized + } + if h.Time <= 0 { + return errors.Errorf("time=%d is not positive", h.Time) + } + if h.Parallelism <= 0 { + return errors.Errorf("parallelism=%d is not positive", h.Parallelism) + } + minMemory := 8 * h.Parallelism + if h.Memory < minMemory { + return errors.Errorf("memory=%d is less than minimum (%d)", h.Memory, minMemory) + } + return nil +} + +// CheckValidity ensures our buffers are the correct length. +func (w *WrappedKeyData) CheckValidity() error { + if w == nil { + return errNotInitialized + } + if len(w.EncryptedKey) == 0 { + return errors.Wrap(errNotInitialized, "encrypted key") + } + if err := util.CheckValidLength(IVLen, len(w.IV)); err != nil { + return errors.Wrap(err, "IV") + } + return errors.Wrap(util.CheckValidLength(HMACLen, len(w.Hmac)), "HMAC") +} + +// CheckValidity ensures our ProtectorData has the correct fields for its source. +func (p *ProtectorData) CheckValidity() error { + if p == nil { + return errNotInitialized + } + + if err := p.Source.CheckValidity(); err != nil { + return errors.Wrap(err, "protector source") + } + + // Source specific checks + switch p.Source { + case SourceType_pam_passphrase: + if p.Uid < 0 { + return errors.Errorf("UID=%d is negative", p.Uid) + } + fallthrough + case SourceType_custom_passphrase: + if err := p.Costs.CheckValidity(); err != nil { + return errors.Wrap(err, "passphrase hashing costs") + } + if err := util.CheckValidLength(SaltLen, len(p.Salt)); err != nil { + return errors.Wrap(err, "passphrase hashing salt") + } + } + + // Generic checks + if err := p.WrappedKey.CheckValidity(); err != nil { + return errors.Wrap(err, "wrapped protector key") + } + if err := util.CheckValidLength(ProtectorDescriptorLen, len(p.ProtectorDescriptor)); err != nil { + return errors.Wrap(err, "protector descriptor") + + } + err := util.CheckValidLength(InternalKeyLen, len(p.WrappedKey.EncryptedKey)) + return errors.Wrap(err, "encrypted protector key") +} + +// CheckValidity ensures each of the options is valid. +func (e *EncryptionOptions) CheckValidity() error { + if e == nil { + return errNotInitialized + } + if _, ok := util.Index(e.Padding, paddingArray); !ok { + return errors.Errorf("padding of %d is invalid", e.Padding) + } + if err := e.Contents.CheckValidity(); err != nil { + return errors.Wrap(err, "contents encryption mode") + } + if err := e.Filenames.CheckValidity(); err != nil { + return errors.Wrap(err, "filenames encryption mode") + } + // If PolicyVersion is unset, treat it as 1. + if e.PolicyVersion == 0 { + e.PolicyVersion = 1 + } + if e.PolicyVersion != 1 && e.PolicyVersion != 2 { + return errors.Errorf("policy version of %d is invalid", e.PolicyVersion) + } + return nil +} + +// CheckValidity ensures the fields are valid and have the correct lengths. +func (w *WrappedPolicyKey) CheckValidity() error { + if w == nil { + return errNotInitialized + } + if err := w.WrappedKey.CheckValidity(); err != nil { + return errors.Wrap(err, "wrapped key") + } + if err := util.CheckValidLength(PolicyKeyLen, len(w.WrappedKey.EncryptedKey)); err != nil { + return errors.Wrap(err, "encrypted key") + } + err := util.CheckValidLength(ProtectorDescriptorLen, len(w.ProtectorDescriptor)) + return errors.Wrap(err, "wrapping protector descriptor") +} + +// CheckValidity ensures the fields and each wrapped key are valid. +func (p *PolicyData) CheckValidity() error { + if p == nil { + return errNotInitialized + } + // Check each wrapped key + for i, w := range p.WrappedPolicyKeys { + if err := w.CheckValidity(); err != nil { + return errors.Wrapf(err, "policy key slot %d", i) + } + } + + if err := p.Options.CheckValidity(); err != nil { + return errors.Wrap(err, "policy options") + } + + var expectedLen int + switch p.Options.PolicyVersion { + case 1: + expectedLen = PolicyDescriptorLenV1 + case 2: + expectedLen = PolicyDescriptorLenV2 + default: + return errors.Errorf("policy version of %d is invalid", p.Options.PolicyVersion) + } + + if err := util.CheckValidLength(expectedLen, len(p.KeyDescriptor)); err != nil { + return errors.Wrap(err, "policy key descriptor") + } + + return nil +} + +// CheckValidity ensures the Config has all the necessary info for its Source. +func (c *Config) CheckValidity() error { + // General checks + if c == nil { + return errNotInitialized + } + if err := c.Source.CheckValidity(); err != nil { + return errors.Wrap(err, "default config source") + } + + // Source specific checks + switch c.Source { + case SourceType_pam_passphrase, SourceType_custom_passphrase: + if err := c.HashCosts.CheckValidity(); err != nil { + return errors.Wrap(err, "config hashing costs") + } + } + + return errors.Wrap(c.Options.CheckValidity(), "config options") +} diff --git a/vendor/github.com/google/fscrypt/metadata/config.go b/vendor/github.com/google/fscrypt/metadata/config.go new file mode 100644 index 000000000..b3c872693 --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/config.go @@ -0,0 +1,59 @@ +/* + * config.go - Parsing for our global config file. The file is simply the JSON + * output of the Config protocol buffer. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package metadata contains all of the on disk structures. +// These structures are defined in metadata.proto. The package also +// contains functions for manipulating these structures, specifically: +// * Reading and Writing the Config file to disk +// * Getting and Setting Policies for directories +// * Reasonable defaults for a Policy's EncryptionOptions +package metadata + +import ( + "io" + + "github.com/golang/protobuf/jsonpb" +) + +// WriteConfig outputs the Config data as nicely formatted JSON +func WriteConfig(config *Config, out io.Writer) error { + m := jsonpb.Marshaler{ + EmitDefaults: true, + EnumsAsInts: false, + Indent: "\t", + OrigName: true, + } + if err := m.Marshal(out, config); err != nil { + return err + } + + _, err := out.Write([]byte{'\n'}) + return err +} + +// ReadConfig writes the JSON data into the config structure +func ReadConfig(in io.Reader) (*Config, error) { + config := new(Config) + // Allow (and ignore) unknown fields for forwards compatibility. + u := jsonpb.Unmarshaler{ + AllowUnknownFields: true, + } + return config, u.Unmarshal(in, config) +} diff --git a/vendor/github.com/google/fscrypt/metadata/constants.go b/vendor/github.com/google/fscrypt/metadata/constants.go new file mode 100644 index 000000000..fa6b8a759 --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/constants.go @@ -0,0 +1,57 @@ +/* + * constants.go - Some metadata constants used throughout fscrypt + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package metadata + +import ( + "crypto/sha256" + + "golang.org/x/sys/unix" +) + +// Lengths for our keys, buffers, and strings used in fscrypt. +const ( + // Length of policy descriptor (in hex chars) for v1 encryption policies + PolicyDescriptorLenV1 = 2 * unix.FSCRYPT_KEY_DESCRIPTOR_SIZE + // Length of protector descriptor (in hex chars) + ProtectorDescriptorLen = PolicyDescriptorLenV1 + // Length of policy descriptor (in hex chars) for v2 encryption policies + PolicyDescriptorLenV2 = 2 * unix.FSCRYPT_KEY_IDENTIFIER_SIZE + // We always use 256-bit keys internally (compared to 512-bit policy keys). + InternalKeyLen = 32 + IVLen = 16 + SaltLen = 16 + // We use SHA256 for the HMAC, and len(HMAC) == len(hash size). + HMACLen = sha256.Size + // PolicyKeyLen is the length of all keys passed directly to the Keyring + PolicyKeyLen = unix.FSCRYPT_MAX_KEY_SIZE +) + +var ( + // DefaultOptions use the supported encryption modes, max padding, and + // policy version 1. + DefaultOptions = &EncryptionOptions{ + Padding: 32, + Contents: EncryptionOptions_AES_256_XTS, + Filenames: EncryptionOptions_AES_256_CTS, + PolicyVersion: 1, + } + // DefaultSource is the source we use if none is specified. + DefaultSource = SourceType_custom_passphrase +) diff --git a/vendor/github.com/google/fscrypt/metadata/metadata.pb.go b/vendor/github.com/google/fscrypt/metadata/metadata.pb.go new file mode 100644 index 000000000..67098043c --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/metadata.pb.go @@ -0,0 +1,589 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: metadata/metadata.proto + +package metadata + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Specifies the method in which an outside secret is obtained for a Protector +type SourceType int32 + +const ( + SourceType_default SourceType = 0 + SourceType_pam_passphrase SourceType = 1 + SourceType_custom_passphrase SourceType = 2 + SourceType_raw_key SourceType = 3 +) + +var SourceType_name = map[int32]string{ + 0: "default", + 1: "pam_passphrase", + 2: "custom_passphrase", + 3: "raw_key", +} +var SourceType_value = map[string]int32{ + "default": 0, + "pam_passphrase": 1, + "custom_passphrase": 2, + "raw_key": 3, +} + +func (x SourceType) String() string { + return proto.EnumName(SourceType_name, int32(x)) +} +func (SourceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{0} +} + +// Type of encryption; should match declarations of unix.FSCRYPT_MODE +type EncryptionOptions_Mode int32 + +const ( + EncryptionOptions_default EncryptionOptions_Mode = 0 + EncryptionOptions_AES_256_XTS EncryptionOptions_Mode = 1 + EncryptionOptions_AES_256_GCM EncryptionOptions_Mode = 2 + EncryptionOptions_AES_256_CBC EncryptionOptions_Mode = 3 + EncryptionOptions_AES_256_CTS EncryptionOptions_Mode = 4 + EncryptionOptions_AES_128_CBC EncryptionOptions_Mode = 5 + EncryptionOptions_AES_128_CTS EncryptionOptions_Mode = 6 + EncryptionOptions_Adiantum EncryptionOptions_Mode = 9 +) + +var EncryptionOptions_Mode_name = map[int32]string{ + 0: "default", + 1: "AES_256_XTS", + 2: "AES_256_GCM", + 3: "AES_256_CBC", + 4: "AES_256_CTS", + 5: "AES_128_CBC", + 6: "AES_128_CTS", + 9: "Adiantum", +} +var EncryptionOptions_Mode_value = map[string]int32{ + "default": 0, + "AES_256_XTS": 1, + "AES_256_GCM": 2, + "AES_256_CBC": 3, + "AES_256_CTS": 4, + "AES_128_CBC": 5, + "AES_128_CTS": 6, + "Adiantum": 9, +} + +func (x EncryptionOptions_Mode) String() string { + return proto.EnumName(EncryptionOptions_Mode_name, int32(x)) +} +func (EncryptionOptions_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{3, 0} +} + +// Cost parameters to be used in our hashing functions. +type HashingCosts struct { + Time int64 `protobuf:"varint,2,opt,name=time,proto3" json:"time,omitempty"` + Memory int64 `protobuf:"varint,3,opt,name=memory,proto3" json:"memory,omitempty"` + Parallelism int64 `protobuf:"varint,4,opt,name=parallelism,proto3" json:"parallelism,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HashingCosts) Reset() { *m = HashingCosts{} } +func (m *HashingCosts) String() string { return proto.CompactTextString(m) } +func (*HashingCosts) ProtoMessage() {} +func (*HashingCosts) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{0} +} +func (m *HashingCosts) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HashingCosts.Unmarshal(m, b) +} +func (m *HashingCosts) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HashingCosts.Marshal(b, m, deterministic) +} +func (dst *HashingCosts) XXX_Merge(src proto.Message) { + xxx_messageInfo_HashingCosts.Merge(dst, src) +} +func (m *HashingCosts) XXX_Size() int { + return xxx_messageInfo_HashingCosts.Size(m) +} +func (m *HashingCosts) XXX_DiscardUnknown() { + xxx_messageInfo_HashingCosts.DiscardUnknown(m) +} + +var xxx_messageInfo_HashingCosts proto.InternalMessageInfo + +func (m *HashingCosts) GetTime() int64 { + if m != nil { + return m.Time + } + return 0 +} + +func (m *HashingCosts) GetMemory() int64 { + if m != nil { + return m.Memory + } + return 0 +} + +func (m *HashingCosts) GetParallelism() int64 { + if m != nil { + return m.Parallelism + } + return 0 +} + +// This structure is used for our authenticated wrapping/unwrapping of keys. +type WrappedKeyData struct { + IV []byte `protobuf:"bytes,1,opt,name=IV,proto3" json:"IV,omitempty"` + EncryptedKey []byte `protobuf:"bytes,2,opt,name=encrypted_key,json=encryptedKey,proto3" json:"encrypted_key,omitempty"` + Hmac []byte `protobuf:"bytes,3,opt,name=hmac,proto3" json:"hmac,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WrappedKeyData) Reset() { *m = WrappedKeyData{} } +func (m *WrappedKeyData) String() string { return proto.CompactTextString(m) } +func (*WrappedKeyData) ProtoMessage() {} +func (*WrappedKeyData) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{1} +} +func (m *WrappedKeyData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WrappedKeyData.Unmarshal(m, b) +} +func (m *WrappedKeyData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WrappedKeyData.Marshal(b, m, deterministic) +} +func (dst *WrappedKeyData) XXX_Merge(src proto.Message) { + xxx_messageInfo_WrappedKeyData.Merge(dst, src) +} +func (m *WrappedKeyData) XXX_Size() int { + return xxx_messageInfo_WrappedKeyData.Size(m) +} +func (m *WrappedKeyData) XXX_DiscardUnknown() { + xxx_messageInfo_WrappedKeyData.DiscardUnknown(m) +} + +var xxx_messageInfo_WrappedKeyData proto.InternalMessageInfo + +func (m *WrappedKeyData) GetIV() []byte { + if m != nil { + return m.IV + } + return nil +} + +func (m *WrappedKeyData) GetEncryptedKey() []byte { + if m != nil { + return m.EncryptedKey + } + return nil +} + +func (m *WrappedKeyData) GetHmac() []byte { + if m != nil { + return m.Hmac + } + return nil +} + +// The associated data for each protector +type ProtectorData struct { + ProtectorDescriptor string `protobuf:"bytes,1,opt,name=protector_descriptor,json=protectorDescriptor,proto3" json:"protector_descriptor,omitempty"` + Source SourceType `protobuf:"varint,2,opt,name=source,proto3,enum=metadata.SourceType" json:"source,omitempty"` + // These are only used by some of the protector types + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Costs *HashingCosts `protobuf:"bytes,4,opt,name=costs,proto3" json:"costs,omitempty"` + Salt []byte `protobuf:"bytes,5,opt,name=salt,proto3" json:"salt,omitempty"` + Uid int64 `protobuf:"varint,6,opt,name=uid,proto3" json:"uid,omitempty"` + WrappedKey *WrappedKeyData `protobuf:"bytes,7,opt,name=wrapped_key,json=wrappedKey,proto3" json:"wrapped_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProtectorData) Reset() { *m = ProtectorData{} } +func (m *ProtectorData) String() string { return proto.CompactTextString(m) } +func (*ProtectorData) ProtoMessage() {} +func (*ProtectorData) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{2} +} +func (m *ProtectorData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProtectorData.Unmarshal(m, b) +} +func (m *ProtectorData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProtectorData.Marshal(b, m, deterministic) +} +func (dst *ProtectorData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProtectorData.Merge(dst, src) +} +func (m *ProtectorData) XXX_Size() int { + return xxx_messageInfo_ProtectorData.Size(m) +} +func (m *ProtectorData) XXX_DiscardUnknown() { + xxx_messageInfo_ProtectorData.DiscardUnknown(m) +} + +var xxx_messageInfo_ProtectorData proto.InternalMessageInfo + +func (m *ProtectorData) GetProtectorDescriptor() string { + if m != nil { + return m.ProtectorDescriptor + } + return "" +} + +func (m *ProtectorData) GetSource() SourceType { + if m != nil { + return m.Source + } + return SourceType_default +} + +func (m *ProtectorData) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ProtectorData) GetCosts() *HashingCosts { + if m != nil { + return m.Costs + } + return nil +} + +func (m *ProtectorData) GetSalt() []byte { + if m != nil { + return m.Salt + } + return nil +} + +func (m *ProtectorData) GetUid() int64 { + if m != nil { + return m.Uid + } + return 0 +} + +func (m *ProtectorData) GetWrappedKey() *WrappedKeyData { + if m != nil { + return m.WrappedKey + } + return nil +} + +// Encryption policy specifics, corresponds to the fscrypt_policy struct +type EncryptionOptions struct { + Padding int64 `protobuf:"varint,1,opt,name=padding,proto3" json:"padding,omitempty"` + Contents EncryptionOptions_Mode `protobuf:"varint,2,opt,name=contents,proto3,enum=metadata.EncryptionOptions_Mode" json:"contents,omitempty"` + Filenames EncryptionOptions_Mode `protobuf:"varint,3,opt,name=filenames,proto3,enum=metadata.EncryptionOptions_Mode" json:"filenames,omitempty"` + PolicyVersion int64 `protobuf:"varint,4,opt,name=policy_version,json=policyVersion,proto3" json:"policy_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptionOptions) Reset() { *m = EncryptionOptions{} } +func (m *EncryptionOptions) String() string { return proto.CompactTextString(m) } +func (*EncryptionOptions) ProtoMessage() {} +func (*EncryptionOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{3} +} +func (m *EncryptionOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptionOptions.Unmarshal(m, b) +} +func (m *EncryptionOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptionOptions.Marshal(b, m, deterministic) +} +func (dst *EncryptionOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptionOptions.Merge(dst, src) +} +func (m *EncryptionOptions) XXX_Size() int { + return xxx_messageInfo_EncryptionOptions.Size(m) +} +func (m *EncryptionOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptionOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptionOptions proto.InternalMessageInfo + +func (m *EncryptionOptions) GetPadding() int64 { + if m != nil { + return m.Padding + } + return 0 +} + +func (m *EncryptionOptions) GetContents() EncryptionOptions_Mode { + if m != nil { + return m.Contents + } + return EncryptionOptions_default +} + +func (m *EncryptionOptions) GetFilenames() EncryptionOptions_Mode { + if m != nil { + return m.Filenames + } + return EncryptionOptions_default +} + +func (m *EncryptionOptions) GetPolicyVersion() int64 { + if m != nil { + return m.PolicyVersion + } + return 0 +} + +type WrappedPolicyKey struct { + ProtectorDescriptor string `protobuf:"bytes,1,opt,name=protector_descriptor,json=protectorDescriptor,proto3" json:"protector_descriptor,omitempty"` + WrappedKey *WrappedKeyData `protobuf:"bytes,2,opt,name=wrapped_key,json=wrappedKey,proto3" json:"wrapped_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WrappedPolicyKey) Reset() { *m = WrappedPolicyKey{} } +func (m *WrappedPolicyKey) String() string { return proto.CompactTextString(m) } +func (*WrappedPolicyKey) ProtoMessage() {} +func (*WrappedPolicyKey) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{4} +} +func (m *WrappedPolicyKey) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WrappedPolicyKey.Unmarshal(m, b) +} +func (m *WrappedPolicyKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WrappedPolicyKey.Marshal(b, m, deterministic) +} +func (dst *WrappedPolicyKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_WrappedPolicyKey.Merge(dst, src) +} +func (m *WrappedPolicyKey) XXX_Size() int { + return xxx_messageInfo_WrappedPolicyKey.Size(m) +} +func (m *WrappedPolicyKey) XXX_DiscardUnknown() { + xxx_messageInfo_WrappedPolicyKey.DiscardUnknown(m) +} + +var xxx_messageInfo_WrappedPolicyKey proto.InternalMessageInfo + +func (m *WrappedPolicyKey) GetProtectorDescriptor() string { + if m != nil { + return m.ProtectorDescriptor + } + return "" +} + +func (m *WrappedPolicyKey) GetWrappedKey() *WrappedKeyData { + if m != nil { + return m.WrappedKey + } + return nil +} + +// The associated data for each policy +type PolicyData struct { + KeyDescriptor string `protobuf:"bytes,1,opt,name=key_descriptor,json=keyDescriptor,proto3" json:"key_descriptor,omitempty"` + Options *EncryptionOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` + WrappedPolicyKeys []*WrappedPolicyKey `protobuf:"bytes,3,rep,name=wrapped_policy_keys,json=wrappedPolicyKeys,proto3" json:"wrapped_policy_keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PolicyData) Reset() { *m = PolicyData{} } +func (m *PolicyData) String() string { return proto.CompactTextString(m) } +func (*PolicyData) ProtoMessage() {} +func (*PolicyData) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{5} +} +func (m *PolicyData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PolicyData.Unmarshal(m, b) +} +func (m *PolicyData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PolicyData.Marshal(b, m, deterministic) +} +func (dst *PolicyData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyData.Merge(dst, src) +} +func (m *PolicyData) XXX_Size() int { + return xxx_messageInfo_PolicyData.Size(m) +} +func (m *PolicyData) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyData.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyData proto.InternalMessageInfo + +func (m *PolicyData) GetKeyDescriptor() string { + if m != nil { + return m.KeyDescriptor + } + return "" +} + +func (m *PolicyData) GetOptions() *EncryptionOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *PolicyData) GetWrappedPolicyKeys() []*WrappedPolicyKey { + if m != nil { + return m.WrappedPolicyKeys + } + return nil +} + +// Data stored in the config file +type Config struct { + Source SourceType `protobuf:"varint,1,opt,name=source,proto3,enum=metadata.SourceType" json:"source,omitempty"` + HashCosts *HashingCosts `protobuf:"bytes,2,opt,name=hash_costs,json=hashCosts,proto3" json:"hash_costs,omitempty"` + Options *EncryptionOptions `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"` + UseFsKeyringForV1Policies bool `protobuf:"varint,5,opt,name=use_fs_keyring_for_v1_policies,json=useFsKeyringForV1Policies,proto3" json:"use_fs_keyring_for_v1_policies,omitempty"` + AllowCrossUserMetadata bool `protobuf:"varint,6,opt,name=allow_cross_user_metadata,json=allowCrossUserMetadata,proto3" json:"allow_cross_user_metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Config) Reset() { *m = Config{} } +func (m *Config) String() string { return proto.CompactTextString(m) } +func (*Config) ProtoMessage() {} +func (*Config) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{6} +} +func (m *Config) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Config.Unmarshal(m, b) +} +func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Config.Marshal(b, m, deterministic) +} +func (dst *Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_Config.Merge(dst, src) +} +func (m *Config) XXX_Size() int { + return xxx_messageInfo_Config.Size(m) +} +func (m *Config) XXX_DiscardUnknown() { + xxx_messageInfo_Config.DiscardUnknown(m) +} + +var xxx_messageInfo_Config proto.InternalMessageInfo + +func (m *Config) GetSource() SourceType { + if m != nil { + return m.Source + } + return SourceType_default +} + +func (m *Config) GetHashCosts() *HashingCosts { + if m != nil { + return m.HashCosts + } + return nil +} + +func (m *Config) GetOptions() *EncryptionOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *Config) GetUseFsKeyringForV1Policies() bool { + if m != nil { + return m.UseFsKeyringForV1Policies + } + return false +} + +func (m *Config) GetAllowCrossUserMetadata() bool { + if m != nil { + return m.AllowCrossUserMetadata + } + return false +} + +func init() { + proto.RegisterType((*HashingCosts)(nil), "metadata.HashingCosts") + proto.RegisterType((*WrappedKeyData)(nil), "metadata.WrappedKeyData") + proto.RegisterType((*ProtectorData)(nil), "metadata.ProtectorData") + proto.RegisterType((*EncryptionOptions)(nil), "metadata.EncryptionOptions") + proto.RegisterType((*WrappedPolicyKey)(nil), "metadata.WrappedPolicyKey") + proto.RegisterType((*PolicyData)(nil), "metadata.PolicyData") + proto.RegisterType((*Config)(nil), "metadata.Config") + proto.RegisterEnum("metadata.SourceType", SourceType_name, SourceType_value) + proto.RegisterEnum("metadata.EncryptionOptions_Mode", EncryptionOptions_Mode_name, EncryptionOptions_Mode_value) +} + +func init() { proto.RegisterFile("metadata/metadata.proto", fileDescriptor_metadata_31965d2849cb292a) } + +var fileDescriptor_metadata_31965d2849cb292a = []byte{ + // 748 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xdb, 0x6a, 0xf3, 0x46, + 0x10, 0xae, 0x24, 0xc7, 0x87, 0xf1, 0xa1, 0xca, 0xfe, 0x69, 0xaa, 0xb4, 0x50, 0x8c, 0x4b, 0x20, + 0x94, 0x90, 0x62, 0x97, 0x94, 0x06, 0x4a, 0x21, 0x75, 0x92, 0x36, 0x09, 0xa1, 0xe9, 0xda, 0x75, + 0x5b, 0x28, 0x88, 0x8d, 0xb4, 0xb6, 0x17, 0x4b, 0x5a, 0xb1, 0xbb, 0x8a, 0xd1, 0x5d, 0xef, 0xfa, + 0x00, 0x7d, 0x97, 0xf6, 0x65, 0xfa, 0x30, 0x45, 0x2b, 0xc9, 0x87, 0x04, 0x42, 0xf2, 0xdf, 0x98, + 0xd9, 0x6f, 0x67, 0xe6, 0x9b, 0xf9, 0x66, 0xc7, 0x82, 0x8f, 0x43, 0xaa, 0x88, 0x4f, 0x14, 0xf9, + 0xb2, 0x34, 0x4e, 0x62, 0xc1, 0x15, 0x47, 0xf5, 0xf2, 0xdc, 0xfb, 0x03, 0x5a, 0x3f, 0x12, 0x39, + 0x67, 0xd1, 0x6c, 0xc8, 0xa5, 0x92, 0x08, 0x41, 0x45, 0xb1, 0x90, 0x3a, 0x66, 0xd7, 0x38, 0xb2, + 0xb0, 0xb6, 0xd1, 0x3e, 0x54, 0x43, 0x1a, 0x72, 0x91, 0x3a, 0x96, 0x46, 0x8b, 0x13, 0xea, 0x42, + 0x33, 0x26, 0x82, 0x04, 0x01, 0x0d, 0x98, 0x0c, 0x9d, 0x8a, 0xbe, 0xdc, 0x84, 0x7a, 0xbf, 0x43, + 0xe7, 0x57, 0x41, 0xe2, 0x98, 0xfa, 0xb7, 0x34, 0xbd, 0x20, 0x8a, 0xa0, 0x0e, 0x98, 0xd7, 0x13, + 0xc7, 0xe8, 0x1a, 0x47, 0x2d, 0x6c, 0x5e, 0x4f, 0xd0, 0xe7, 0xd0, 0xa6, 0x91, 0x27, 0xd2, 0x58, + 0x51, 0xdf, 0x5d, 0xd0, 0x54, 0x13, 0xb7, 0x70, 0x6b, 0x05, 0xde, 0xd2, 0x34, 0x2b, 0x6a, 0x1e, + 0x12, 0x4f, 0xd3, 0xb7, 0xb0, 0xb6, 0x7b, 0x7f, 0x9b, 0xd0, 0xbe, 0x17, 0x5c, 0x51, 0x4f, 0x71, + 0xa1, 0x53, 0xf7, 0x61, 0x2f, 0x2e, 0x01, 0xd7, 0xa7, 0xd2, 0x13, 0x2c, 0x56, 0x5c, 0x68, 0xb2, + 0x06, 0x7e, 0xb7, 0xba, 0xbb, 0x58, 0x5d, 0xa1, 0x63, 0xa8, 0x4a, 0x9e, 0x08, 0x2f, 0xef, 0xb7, + 0x33, 0xd8, 0x3b, 0x59, 0x09, 0x35, 0xd2, 0xf8, 0x38, 0x8d, 0x29, 0x2e, 0x7c, 0xb2, 0x32, 0x22, + 0x12, 0x52, 0x5d, 0x46, 0x03, 0x6b, 0x1b, 0x1d, 0xc3, 0x8e, 0x97, 0x09, 0xa7, 0xbb, 0x6f, 0x0e, + 0xf6, 0xd7, 0x09, 0x36, 0x65, 0xc5, 0xb9, 0x53, 0x96, 0x41, 0x92, 0x40, 0x39, 0x3b, 0x79, 0x23, + 0x99, 0x8d, 0x6c, 0xb0, 0x12, 0xe6, 0x3b, 0x55, 0xad, 0x5e, 0x66, 0xa2, 0x33, 0x68, 0x2e, 0x73, + 0xd5, 0xb4, 0x22, 0x35, 0x9d, 0xd9, 0x59, 0x67, 0xde, 0x96, 0x14, 0xc3, 0x72, 0x75, 0xee, 0xfd, + 0x67, 0xc2, 0xee, 0x65, 0x2e, 0x1d, 0xe3, 0xd1, 0x4f, 0xfa, 0x57, 0x22, 0x07, 0x6a, 0x31, 0xf1, + 0x7d, 0x16, 0xcd, 0xb4, 0x18, 0x16, 0x2e, 0x8f, 0xe8, 0x5b, 0xa8, 0x7b, 0x3c, 0x52, 0x34, 0x52, + 0xb2, 0x90, 0xa0, 0xbb, 0xe6, 0x79, 0x96, 0xe8, 0xe4, 0x8e, 0xfb, 0x14, 0xaf, 0x22, 0xd0, 0x77, + 0xd0, 0x98, 0xb2, 0x80, 0x66, 0x42, 0x48, 0xad, 0xca, 0x6b, 0xc2, 0xd7, 0x21, 0xe8, 0x10, 0x3a, + 0x31, 0x0f, 0x98, 0x97, 0xba, 0x8f, 0x54, 0x48, 0xc6, 0xa3, 0xe2, 0x0d, 0xb5, 0x73, 0x74, 0x92, + 0x83, 0xbd, 0xbf, 0x0c, 0xa8, 0x64, 0xa1, 0xa8, 0x09, 0x35, 0x9f, 0x4e, 0x49, 0x12, 0x28, 0xfb, + 0x03, 0xf4, 0x21, 0x34, 0xcf, 0x2f, 0x47, 0xee, 0xe0, 0xf4, 0x6b, 0xf7, 0xb7, 0xf1, 0xc8, 0x36, + 0x36, 0x81, 0x1f, 0x86, 0x77, 0xb6, 0xb9, 0x09, 0x0c, 0xbf, 0x1f, 0xda, 0xd6, 0x16, 0x30, 0x1e, + 0xd9, 0x95, 0x12, 0xe8, 0x0f, 0xbe, 0xd1, 0x1e, 0x3b, 0x5b, 0xc0, 0x78, 0x64, 0x57, 0x51, 0x0b, + 0xea, 0xe7, 0x3e, 0x23, 0x91, 0x4a, 0x42, 0xbb, 0xd1, 0xfb, 0xd3, 0x00, 0xbb, 0x50, 0xff, 0x5e, + 0x97, 0x98, 0xbd, 0xce, 0xf7, 0x78, 0x77, 0x4f, 0x26, 0x6c, 0xbe, 0x61, 0xc2, 0xff, 0x18, 0x00, + 0x39, 0xb7, 0x7e, 0xf4, 0x87, 0xd0, 0x59, 0xd0, 0xf4, 0x39, 0x6d, 0x7b, 0x41, 0xd3, 0x0d, 0xc2, + 0x53, 0xa8, 0xf1, 0x7c, 0x08, 0x05, 0xd9, 0xa7, 0x2f, 0xcc, 0x09, 0x97, 0xbe, 0xe8, 0x06, 0xde, + 0x95, 0x75, 0x16, 0x83, 0x5a, 0xd0, 0x34, 0x1b, 0xb5, 0x75, 0xd4, 0x1c, 0x7c, 0xf2, 0xac, 0xde, + 0x95, 0x26, 0x78, 0x77, 0xf9, 0x04, 0x91, 0xbd, 0x7f, 0x4d, 0xa8, 0x0e, 0x79, 0x34, 0x65, 0xb3, + 0x8d, 0xb5, 0x33, 0x5e, 0xb1, 0x76, 0xa7, 0x00, 0x73, 0x22, 0xe7, 0x6e, 0xbe, 0x67, 0xe6, 0x8b, + 0x7b, 0xd6, 0xc8, 0x3c, 0xf3, 0x7f, 0xb2, 0x8d, 0x96, 0x2b, 0x6f, 0x68, 0xf9, 0x1c, 0x3e, 0x4b, + 0x24, 0x75, 0xa7, 0x32, 0x6b, 0x55, 0xb0, 0x68, 0xe6, 0x4e, 0xb9, 0x70, 0x1f, 0xfb, 0xb9, 0x00, + 0x8c, 0x4a, 0xbd, 0xbc, 0x75, 0x7c, 0x90, 0x48, 0x7a, 0x25, 0x6f, 0x73, 0x9f, 0x2b, 0x2e, 0x26, + 0xfd, 0xfb, 0xc2, 0x01, 0x9d, 0xc1, 0x01, 0x09, 0x02, 0xbe, 0x74, 0x3d, 0xc1, 0xa5, 0x74, 0x13, + 0x49, 0x85, 0x5b, 0x52, 0xeb, 0x3d, 0xaf, 0xe3, 0x7d, 0xed, 0x30, 0xcc, 0xee, 0x7f, 0x91, 0x54, + 0xdc, 0x15, 0xb7, 0x37, 0x95, 0xba, 0x65, 0x57, 0x70, 0xdb, 0xe3, 0x61, 0x4c, 0x14, 0x7b, 0x60, + 0x01, 0x53, 0xe9, 0x17, 0x3f, 0x03, 0xac, 0x65, 0xd9, 0x5e, 0x02, 0x04, 0x9d, 0x98, 0x84, 0x6e, + 0x4c, 0xa4, 0x8c, 0xe7, 0x82, 0x48, 0x6a, 0x1b, 0xe8, 0x23, 0xd8, 0xf5, 0x12, 0xa9, 0xf8, 0x16, + 0x6c, 0x66, 0x71, 0x82, 0x2c, 0xb3, 0xae, 0x6c, 0xeb, 0xa1, 0xaa, 0xbf, 0x03, 0x5f, 0xfd, 0x1f, + 0x00, 0x00, 0xff, 0xff, 0xe2, 0x78, 0x9e, 0x2e, 0x22, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/google/fscrypt/metadata/metadata.proto b/vendor/github.com/google/fscrypt/metadata/metadata.proto new file mode 100644 index 000000000..84245e020 --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/metadata.proto @@ -0,0 +1,107 @@ +/* + * metadata.proto - File which contains all of the metadata structures which we + * write to metadata files. Must be compiled with protoc to use the library. + * Compilation can be invoked with go generate. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// If you modify this file, be sure to run "go generate" on this package. +syntax = "proto3"; +package metadata; + +// Cost parameters to be used in our hashing functions. +message HashingCosts { + int64 time = 2; + int64 memory = 3; + int64 parallelism = 4; +} + +// This structure is used for our authenticated wrapping/unwrapping of keys. +message WrappedKeyData { + bytes IV = 1; + bytes encrypted_key = 2; + bytes hmac = 3; +} + +// Specifies the method in which an outside secret is obtained for a Protector +enum SourceType { + default = 0; + pam_passphrase = 1; + custom_passphrase = 2; + raw_key = 3; +} + +// The associated data for each protector +message ProtectorData { + string protector_descriptor = 1; + SourceType source = 2; + + // These are only used by some of the protector types + string name = 3; + HashingCosts costs = 4; + bytes salt = 5; + int64 uid = 6; + + WrappedKeyData wrapped_key = 7; +} + +// Encryption policy specifics, corresponds to the fscrypt_policy struct +message EncryptionOptions { + int64 padding = 1; + + // Type of encryption; should match declarations of unix.FSCRYPT_MODE + enum Mode { + default = 0; + AES_256_XTS = 1; + AES_256_GCM = 2; + AES_256_CBC = 3; + AES_256_CTS = 4; + AES_128_CBC = 5; + AES_128_CTS = 6; + Adiantum = 9; + } + + Mode contents = 2; + Mode filenames = 3; + + int64 policy_version = 4; +} + +message WrappedPolicyKey { + string protector_descriptor = 1; + WrappedKeyData wrapped_key = 2; +} + +// The associated data for each policy +message PolicyData { + string key_descriptor = 1; + EncryptionOptions options = 2; + repeated WrappedPolicyKey wrapped_policy_keys = 3; +} + +// Data stored in the config file +message Config { + SourceType source = 1; + HashingCosts hash_costs = 2; + EncryptionOptions options = 4; + bool use_fs_keyring_for_v1_policies = 5; + bool allow_cross_user_metadata = 6; + + // reserve the removed field 'string compatibility = 3;' + reserved 3; + reserved "compatibility"; +} diff --git a/vendor/github.com/google/fscrypt/metadata/policy.go b/vendor/github.com/google/fscrypt/metadata/policy.go new file mode 100644 index 000000000..e218a0814 --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/policy.go @@ -0,0 +1,348 @@ +/* + * policy.go - Functions for getting and setting policies on a specified + * directory or file. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package metadata + +import ( + "encoding/hex" + "fmt" + "log" + "math" + "os" + "os/user" + "strconv" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/util" +) + +var ( + // ErrEncryptionNotSupported indicates that encryption is not supported + // on the given filesystem, and there is no way to enable it. + ErrEncryptionNotSupported = errors.New("encryption not supported") + + // ErrEncryptionNotEnabled indicates that encryption is not supported on + // the given filesystem, but there is a way to enable it. + ErrEncryptionNotEnabled = errors.New("encryption not enabled") +) + +// ErrAlreadyEncrypted indicates that the path is already encrypted. +type ErrAlreadyEncrypted struct { + Path string +} + +func (err *ErrAlreadyEncrypted) Error() string { + return fmt.Sprintf("file or directory %q is already encrypted", err.Path) +} + +// ErrBadEncryptionOptions indicates that unsupported encryption options were given. +type ErrBadEncryptionOptions struct { + Path string + Options *EncryptionOptions +} + +func (err *ErrBadEncryptionOptions) Error() string { + return fmt.Sprintf(`cannot encrypt %q because the kernel doesn't support the requested encryption options. + + The options are %s`, err.Path, err.Options) +} + +// ErrDirectoryNotOwned indicates a directory can't be encrypted because it's +// owned by another user. +type ErrDirectoryNotOwned struct { + Path string + Owner uint32 +} + +func (err *ErrDirectoryNotOwned) Error() string { + owner := strconv.Itoa(int(err.Owner)) + if u, e := user.LookupId(owner); e == nil && u.Username != "" { + owner = u.Username + } + return fmt.Sprintf(`cannot encrypt %q because it's owned by another user (%s). + + Encryption can only be enabled on a directory you own, even if you have + write access to the directory.`, err.Path, owner) +} + +// ErrNotEncrypted indicates that the path is not encrypted. +type ErrNotEncrypted struct { + Path string +} + +func (err *ErrNotEncrypted) Error() string { + return fmt.Sprintf("file or directory %q is not encrypted", err.Path) +} + +func policyIoctl(file *os.File, request uintptr, arg unsafe.Pointer) error { + _, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), request, uintptr(arg)) + if errno == 0 { + return nil + } + return errno +} + +// Maps EncryptionOptions.Padding <-> FSCRYPT_POLICY_FLAGS +var ( + paddingArray = []int64{4, 8, 16, 32} + flagsArray = []int64{unix.FSCRYPT_POLICY_FLAGS_PAD_4, unix.FSCRYPT_POLICY_FLAGS_PAD_8, + unix.FSCRYPT_POLICY_FLAGS_PAD_16, unix.FSCRYPT_POLICY_FLAGS_PAD_32} +) + +// flagsToPadding returns the amount of padding specified in the policy flags. +func flagsToPadding(flags uint8) int64 { + paddingFlag := int64(flags & unix.FS_POLICY_FLAGS_PAD_MASK) + + // This lookup should always succeed + padding, ok := util.Lookup(paddingFlag, flagsArray, paddingArray) + if !ok { + log.Panicf("padding flag of %x not found", paddingFlag) + } + return padding +} + +func buildV1PolicyData(policy *unix.FscryptPolicyV1) *PolicyData { + return &PolicyData{ + KeyDescriptor: hex.EncodeToString(policy.Master_key_descriptor[:]), + Options: &EncryptionOptions{ + Padding: flagsToPadding(policy.Flags), + Contents: EncryptionOptions_Mode(policy.Contents_encryption_mode), + Filenames: EncryptionOptions_Mode(policy.Filenames_encryption_mode), + PolicyVersion: 1, + }, + } +} + +func buildV2PolicyData(policy *unix.FscryptPolicyV2) *PolicyData { + return &PolicyData{ + KeyDescriptor: hex.EncodeToString(policy.Master_key_identifier[:]), + Options: &EncryptionOptions{ + Padding: flagsToPadding(policy.Flags), + Contents: EncryptionOptions_Mode(policy.Contents_encryption_mode), + Filenames: EncryptionOptions_Mode(policy.Filenames_encryption_mode), + PolicyVersion: 2, + }, + } +} + +// GetPolicy returns the Policy data for the given directory or file (includes +// the KeyDescriptor and the encryption options). Returns an error if the +// path is not encrypted or the policy couldn't be retrieved. +func GetPolicy(path string) (*PolicyData, error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + // First try the new version of the ioctl. This works for both v1 and v2 policies. + var arg unix.FscryptGetPolicyExArg + arg.Size = uint64(unsafe.Sizeof(arg.Policy)) + policyPtr := util.Ptr(arg.Policy[:]) + err = policyIoctl(file, unix.FS_IOC_GET_ENCRYPTION_POLICY_EX, unsafe.Pointer(&arg)) + if err == unix.ENOTTY { + // Fall back to the old version of the ioctl. This works for v1 policies only. + err = policyIoctl(file, unix.FS_IOC_GET_ENCRYPTION_POLICY, policyPtr) + arg.Size = uint64(unsafe.Sizeof(unix.FscryptPolicyV1{})) + } + switch err { + case nil: + break + case unix.ENOTTY: + return nil, ErrEncryptionNotSupported + case unix.EOPNOTSUPP: + return nil, ErrEncryptionNotEnabled + case unix.ENODATA, unix.ENOENT: + // ENOENT was returned instead of ENODATA on some filesystems before v4.11. + return nil, &ErrNotEncrypted{path} + default: + return nil, errors.Wrapf(err, "failed to get encryption policy of %q", path) + } + switch arg.Policy[0] { // arg.policy.version + case unix.FSCRYPT_POLICY_V1: + if arg.Size != uint64(unsafe.Sizeof(unix.FscryptPolicyV1{})) { + // should never happen + return nil, errors.New("unexpected size for v1 policy") + } + return buildV1PolicyData((*unix.FscryptPolicyV1)(policyPtr)), nil + case unix.FSCRYPT_POLICY_V2: + if arg.Size != uint64(unsafe.Sizeof(unix.FscryptPolicyV2{})) { + // should never happen + return nil, errors.New("unexpected size for v2 policy") + } + return buildV2PolicyData((*unix.FscryptPolicyV2)(policyPtr)), nil + default: + return nil, errors.Errorf("unsupported encryption policy version [%d]", + arg.Policy[0]) + } +} + +// For improved performance, use the DIRECT_KEY flag when using ciphers that +// support it, e.g. Adiantum. It is safe because fscrypt won't reuse the key +// for any other policy. (Multiple directories with same policy are okay.) +func shouldUseDirectKeyFlag(options *EncryptionOptions) bool { + // Contents and filenames encryption modes must be the same + if options.Contents != options.Filenames { + return false + } + // Currently only Adiantum supports DIRECT_KEY. + return options.Contents == EncryptionOptions_Adiantum +} + +func buildPolicyFlags(options *EncryptionOptions) uint8 { + // This lookup should always succeed (as policy is valid) + flags, ok := util.Lookup(options.Padding, paddingArray, flagsArray) + if !ok { + log.Panicf("padding of %d was not found", options.Padding) + } + if shouldUseDirectKeyFlag(options) { + flags |= unix.FSCRYPT_POLICY_FLAG_DIRECT_KEY + } + return uint8(flags) +} + +func setV1Policy(file *os.File, options *EncryptionOptions, descriptorBytes []byte) error { + policy := unix.FscryptPolicyV1{ + Version: unix.FSCRYPT_POLICY_V1, + Contents_encryption_mode: uint8(options.Contents), + Filenames_encryption_mode: uint8(options.Filenames), + Flags: uint8(buildPolicyFlags(options)), + } + + // The descriptor should always be the correct length (as policy is valid) + if len(descriptorBytes) != unix.FSCRYPT_KEY_DESCRIPTOR_SIZE { + log.Panic("wrong descriptor size for v1 policy") + } + copy(policy.Master_key_descriptor[:], descriptorBytes) + + return policyIoctl(file, unix.FS_IOC_SET_ENCRYPTION_POLICY, unsafe.Pointer(&policy)) +} + +func setV2Policy(file *os.File, options *EncryptionOptions, descriptorBytes []byte) error { + policy := unix.FscryptPolicyV2{ + Version: unix.FSCRYPT_POLICY_V2, + Contents_encryption_mode: uint8(options.Contents), + Filenames_encryption_mode: uint8(options.Filenames), + Flags: uint8(buildPolicyFlags(options)), + } + + // The descriptor should always be the correct length (as policy is valid) + if len(descriptorBytes) != unix.FSCRYPT_KEY_IDENTIFIER_SIZE { + log.Panic("wrong descriptor size for v2 policy") + } + copy(policy.Master_key_identifier[:], descriptorBytes) + + return policyIoctl(file, unix.FS_IOC_SET_ENCRYPTION_POLICY, unsafe.Pointer(&policy)) +} + +// SetPolicy sets up the specified directory to be encrypted with the specified +// policy. Returns an error if we cannot set the policy for any reason (not a +// directory, invalid options or KeyDescriptor, etc). +func SetPolicy(path string, data *PolicyData) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + if err = data.CheckValidity(); err != nil { + return errors.Wrap(err, "invalid policy") + } + + descriptorBytes, err := hex.DecodeString(data.KeyDescriptor) + if err != nil { + return errors.New("invalid key descriptor: " + data.KeyDescriptor) + } + + switch data.Options.PolicyVersion { + case 1: + err = setV1Policy(file, data.Options, descriptorBytes) + case 2: + err = setV2Policy(file, data.Options, descriptorBytes) + default: + err = errors.Errorf("policy version of %d is invalid", data.Options.PolicyVersion) + } + if err == unix.EINVAL { + // Before kernel v4.11, many different errors all caused unix.EINVAL to be returned. + // We try to disambiguate this error here. This disambiguation will not always give + // the correct error due to a potential race condition on path. + if info, statErr := os.Stat(path); statErr != nil || !info.IsDir() { + // Checking if the path is not a directory + err = unix.ENOTDIR + } else if _, policyErr := GetPolicy(path); policyErr == nil { + // Checking if a policy is already set on this directory + err = unix.EEXIST + } + } + switch err { + case nil: + return nil + case unix.EACCES: + var stat unix.Stat_t + if statErr := unix.Stat(path, &stat); statErr == nil && stat.Uid != uint32(os.Geteuid()) { + return &ErrDirectoryNotOwned{path, stat.Uid} + } + case unix.EEXIST: + return &ErrAlreadyEncrypted{path} + case unix.EINVAL: + return &ErrBadEncryptionOptions{path, data.Options} + case unix.ENOTTY: + return ErrEncryptionNotSupported + case unix.EOPNOTSUPP: + return ErrEncryptionNotEnabled + } + return errors.Wrapf(err, "failed to set encryption policy on %q", path) +} + +// CheckSupport returns an error if the filesystem containing path does not +// support filesystem encryption. This can be for many reasons including an +// incompatible kernel or filesystem or not enabling the right feature flags. +func CheckSupport(path string) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + // On supported directories, giving a bad policy will return EINVAL + badPolicy := unix.FscryptPolicyV1{ + Version: math.MaxUint8, + Contents_encryption_mode: math.MaxUint8, + Filenames_encryption_mode: math.MaxUint8, + Flags: math.MaxUint8, + } + + err = policyIoctl(file, unix.FS_IOC_SET_ENCRYPTION_POLICY, unsafe.Pointer(&badPolicy)) + switch err { + case nil: + log.Panicf(`FS_IOC_SET_ENCRYPTION_POLICY succeeded when it should have failed. + Please open an issue, filesystem %q may be corrupted.`, path) + case unix.EINVAL, unix.EACCES: + return nil + case unix.ENOTTY: + return ErrEncryptionNotSupported + case unix.EOPNOTSUPP: + return ErrEncryptionNotEnabled + } + return errors.Wrapf(err, "unexpected error checking for encryption support on filesystem %q", path) +} diff --git a/vendor/github.com/google/fscrypt/security/cache.go b/vendor/github.com/google/fscrypt/security/cache.go new file mode 100644 index 000000000..f11248d2b --- /dev/null +++ b/vendor/github.com/google/fscrypt/security/cache.go @@ -0,0 +1,49 @@ +/* + * cache.go - Handles cache clearing and management. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package security + +import ( + "log" + "os" + + "golang.org/x/sys/unix" +) + +// DropFilesystemCache instructs the kernel to free the reclaimable inodes and +// dentries. This has the effect of making encrypted directories whose keys are +// not present no longer accessible. Requires root privileges. +func DropFilesystemCache() error { + // Dirty reclaimable inodes must be synced so that they will be freed. + log.Print("syncing changes to filesystem") + unix.Sync() + + // See: https://www.kernel.org/doc/Documentation/sysctl/vm.txt + log.Print("freeing reclaimable inodes and dentries") + file, err := os.OpenFile("/proc/sys/vm/drop_caches", os.O_WRONLY|os.O_SYNC, 0) + if err != nil { + return err + } + defer file.Close() + // "2" just frees the reclaimable inodes and dentries. The associated + // pages to these inodes will be freed. We do not need to free the + // entire pagecache (as this will severely impact performance). + _, err = file.WriteString("2") + return err +} diff --git a/vendor/github.com/google/fscrypt/security/privileges.go b/vendor/github.com/google/fscrypt/security/privileges.go new file mode 100644 index 000000000..5bdd43c5d --- /dev/null +++ b/vendor/github.com/google/fscrypt/security/privileges.go @@ -0,0 +1,156 @@ +/* + * privileges.go - Functions for managing users and privileges. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package security manages: +// - Cache clearing (cache.go) +// - Privilege manipulation (privileges.go) +package security + +// Use the libc versions of setreuid, setregid, and setgroups instead of the +// "sys/unix" versions. The "sys/unix" versions use the raw syscalls which +// operate on the calling thread only, whereas the libc versions operate on the +// whole process. And we need to operate on the whole process, firstly for +// pam_fscrypt to prevent the privileges of Go worker threads from diverging +// from the PAM stack's "main" thread, violating libc's assumption and causing +// an abort() later in the PAM stack; and secondly because Go code may migrate +// between OS-level threads while it's running. +// +// See also: https://github.com/golang/go/issues/1435 + +/* +#define _GNU_SOURCE // for getresuid and setresuid +#include +#include // getting and setting uids and gids +#include // setgroups +*/ +import "C" + +import ( + "log" + "os/user" + "syscall" + + "github.com/pkg/errors" + + "github.com/google/fscrypt/util" +) + +// Privileges encapsulate the effective uid/gid and groups of a process. +type Privileges struct { + euid C.uid_t + egid C.gid_t + groups []C.gid_t +} + +// ProcessPrivileges returns the process's current effective privileges. +func ProcessPrivileges() (*Privileges, error) { + ruid := C.getuid() + euid := C.geteuid() + rgid := C.getgid() + egid := C.getegid() + + var groups []C.gid_t + n, err := C.getgroups(0, nil) + if n < 0 { + return nil, err + } + // If n == 0, the user isn't in any groups, so groups == nil is fine. + if n > 0 { + groups = make([]C.gid_t, n) + n, err = C.getgroups(n, &groups[0]) + if n < 0 { + return nil, err + } + groups = groups[:n] + } + log.Printf("Current privs (real, effective): uid=(%d,%d) gid=(%d,%d) groups=%v", + ruid, euid, rgid, egid, groups) + return &Privileges{euid, egid, groups}, nil +} + +// UserPrivileges returns the default privileges for the specified user. +func UserPrivileges(user *user.User) (*Privileges, error) { + privs := &Privileges{ + euid: C.uid_t(util.AtoiOrPanic(user.Uid)), + egid: C.gid_t(util.AtoiOrPanic(user.Gid)), + } + userGroups, err := user.GroupIds() + if err != nil { + return nil, util.SystemError(err.Error()) + } + privs.groups = make([]C.gid_t, len(userGroups)) + for i, group := range userGroups { + privs.groups[i] = C.gid_t(util.AtoiOrPanic(group)) + } + return privs, nil +} + +// SetProcessPrivileges sets the privileges of the current process to have those +// specified by privs. The original privileges can be obtained by first saving +// the output of ProcessPrivileges, calling SetProcessPrivileges with the +// desired privs, then calling SetProcessPrivileges with the saved privs. +func SetProcessPrivileges(privs *Privileges) error { + log.Printf("Setting euid=%d egid=%d groups=%v", privs.euid, privs.egid, privs.groups) + + // If setting privs as root, we need to set the euid to 0 first, so that + // we will have the necessary permissions to make the other changes to + // the groups/egid/euid, regardless of our original euid. + C.seteuid(0) + + // Separately handle the case where the user is in no groups. + numGroups := C.size_t(len(privs.groups)) + groupsPtr := (*C.gid_t)(nil) + if numGroups > 0 { + groupsPtr = &privs.groups[0] + } + + if res, err := C.setgroups(numGroups, groupsPtr); res < 0 { + return errors.Wrapf(err.(syscall.Errno), "setting groups") + } + if res, err := C.setegid(privs.egid); res < 0 { + return errors.Wrapf(err.(syscall.Errno), "setting egid") + } + if res, err := C.seteuid(privs.euid); res < 0 { + return errors.Wrapf(err.(syscall.Errno), "setting euid") + } + ProcessPrivileges() + return nil +} + +// SetUids sets the process's real, effective, and saved UIDs. +func SetUids(ruid, euid, suid int) error { + log.Printf("Setting ruid=%d euid=%d suid=%d", ruid, euid, suid) + // We elevate all the privs before setting them. This prevents issues + // with (ruid=1000,euid=1000,suid=0), where just a single call to + // setresuid might fail with permission denied. + if res, err := C.setresuid(0, 0, 0); res < 0 { + return errors.Wrapf(err.(syscall.Errno), "setting uids") + } + if res, err := C.setresuid(C.uid_t(ruid), C.uid_t(euid), C.uid_t(suid)); res < 0 { + return errors.Wrapf(err.(syscall.Errno), "setting uids") + } + return nil +} + +// GetUids gets the process's real, effective, and saved UIDs. +func GetUids() (int, int, int) { + var ruid, euid, suid C.uid_t + C.getresuid(&ruid, &euid, &suid) + return int(ruid), int(euid), int(suid) +} diff --git a/vendor/github.com/google/fscrypt/util/errors.go b/vendor/github.com/google/fscrypt/util/errors.go new file mode 100644 index 000000000..3c87a2c4c --- /dev/null +++ b/vendor/github.com/google/fscrypt/util/errors.go @@ -0,0 +1,135 @@ +/* + * errors.go - Custom errors and error functions used by fscrypt + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package util + +import ( + "fmt" + "io" + "log" + "os" + + "github.com/pkg/errors" +) + +// ErrReader wraps an io.Reader, passing along calls to Read() until a read +// fails. Then, the error is stored, and all subsequent calls to Read() do +// nothing. This allows you to write code which has many subsequent reads and +// do all of the error checking at the end. For example: +// +// r := NewErrReader(reader) +// r.Read(foo) +// r.Read(bar) +// r.Read(baz) +// if r.Err() != nil { +// // Handle error +// } +// +// Taken from https://blog.golang.org/errors-are-values by Rob Pike. +type ErrReader struct { + r io.Reader + err error +} + +// NewErrReader creates an ErrReader which wraps the provided reader. +func NewErrReader(reader io.Reader) *ErrReader { + return &ErrReader{r: reader, err: nil} +} + +// Read runs ReadFull on the wrapped reader if no errors have occurred. +// Otherwise, the previous error is just returned and no reads are attempted. +func (e *ErrReader) Read(p []byte) (n int, err error) { + if e.err == nil { + n, e.err = io.ReadFull(e.r, p) + } + return n, e.err +} + +// Err returns the first encountered err (or nil if no errors occurred). +func (e *ErrReader) Err() error { + return e.err +} + +// ErrWriter works exactly like ErrReader, except with io.Writer. +type ErrWriter struct { + w io.Writer + err error +} + +// NewErrWriter creates an ErrWriter which wraps the provided writer. +func NewErrWriter(writer io.Writer) *ErrWriter { + return &ErrWriter{w: writer, err: nil} +} + +// Write runs the wrapped writer's Write if no errors have occurred. Otherwise, +// the previous error is just returned and no writes are attempted. +func (e *ErrWriter) Write(p []byte) (n int, err error) { + if e.err == nil { + n, e.err = e.w.Write(p) + } + return n, e.err +} + +// Err returns the first encountered err (or nil if no errors occurred). +func (e *ErrWriter) Err() error { + return e.err +} + +// CheckValidLength returns an invalid length error if expected != actual +func CheckValidLength(expected, actual int) error { + if expected == actual { + return nil + } + return fmt.Errorf("expected length of %d, got %d", expected, actual) +} + +// SystemError is an error that should indicate something has gone wrong in the +// underlying system (syscall failure, bad ioctl, etc...). +type SystemError string + +func (s SystemError) Error() string { + return "system error: " + string(s) +} + +// NeverError panics if a non-nil error is passed in. It should be used to check +// for logic errors, not to handle recoverable errors. +func NeverError(err error) { + if err != nil { + log.Panicf("NeverError() check failed: %v", err) + } +} + +var ( + // testEnvVarName is the name of an environment variable that should be + // set to an empty mountpoint. This is only used for integration tests. + // If not set, integration tests are skipped. + testEnvVarName = "TEST_FILESYSTEM_ROOT" + // ErrSkipIntegration indicates integration tests shouldn't be run. + ErrSkipIntegration = errors.New("skipping integration test") +) + +// TestRoot returns a the root of a filesystem specified by testEnvVarName. This +// function is only used for integration tests. +func TestRoot() (string, error) { + path := os.Getenv(testEnvVarName) + if path == "" { + return "", ErrSkipIntegration + } + return path, nil +} diff --git a/vendor/github.com/google/fscrypt/util/util.go b/vendor/github.com/google/fscrypt/util/util.go new file mode 100644 index 000000000..1dab335b8 --- /dev/null +++ b/vendor/github.com/google/fscrypt/util/util.go @@ -0,0 +1,163 @@ +/* + * util.go - Various helpers used throughout fscrypt + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package util contains useful components for simplifying Go code. +// +// The package contains common error types (errors.go) and functions for +// converting arrays to pointers. +package util + +import ( + "bufio" + "fmt" + "log" + "os" + "os/user" + "strconv" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Ptr converts a Go byte array to a pointer to the start of the array. +func Ptr(slice []byte) unsafe.Pointer { + if len(slice) == 0 { + return nil + } + return unsafe.Pointer(&slice[0]) +} + +// ByteSlice takes a pointer to some data and views it as a slice of bytes. +// Note, indexing into this slice is unsafe. +func ByteSlice(ptr unsafe.Pointer) []byte { + // Slice must fit in the smallest address space go supports. + return (*[1 << 30]byte)(ptr)[:] +} + +// PointerSlice takes a pointer to an array of pointers and views it as a slice +// of pointers. Note, indexing into this slice is unsafe. +func PointerSlice(ptr unsafe.Pointer) []unsafe.Pointer { + // Slice must fit in the smallest address space go supports. + return (*[1 << 28]unsafe.Pointer)(ptr)[:] +} + +// Index returns the first index i such that inVal == inArray[i]. +// ok is true if we find a match, false otherwise. +func Index(inVal int64, inArray []int64) (index int, ok bool) { + for index, val := range inArray { + if val == inVal { + return index, true + } + } + return 0, false +} + +// Lookup finds inVal in inArray and returns the corresponding element in +// outArray. Specifically, if inVal == inArray[i], outVal == outArray[i]. +// ok is true if we find a match, false otherwise. +func Lookup(inVal int64, inArray, outArray []int64) (outVal int64, ok bool) { + index, ok := Index(inVal, inArray) + if !ok { + return 0, false + } + return outArray[index], true +} + +// MinInt returns the lesser of a and b. +func MinInt(a, b int) int { + if a < b { + return a + } + return b +} + +// MaxInt returns the greater of a and b. +func MaxInt(a, b int) int { + if a > b { + return a + } + return b +} + +// MinInt64 returns the lesser of a and b. +func MinInt64(a, b int64) int64 { + if a < b { + return a + } + return b +} + +// ReadLine returns a line of input from standard input. An empty string is +// returned if the user didn't insert anything or on error. +func ReadLine() (string, error) { + scanner := bufio.NewScanner(os.Stdin) + scanner.Scan() + return scanner.Text(), scanner.Err() +} + +// AtoiOrPanic converts a string to an int or it panics. Should only be used in +// situations where the input MUST be a decimal number. +func AtoiOrPanic(input string) int { + i, err := strconv.Atoi(input) + if err != nil { + panic(err) + } + return i +} + +// UserFromUID returns the User corresponding to the given user id. +func UserFromUID(uid int64) (*user.User, error) { + return user.LookupId(strconv.FormatInt(uid, 10)) +} + +// EffectiveUser returns the user entry corresponding to the effective user. +func EffectiveUser() (*user.User, error) { + return UserFromUID(int64(os.Geteuid())) +} + +// IsUserRoot checks if the effective user is root. +func IsUserRoot() bool { + return os.Geteuid() == 0 +} + +// Chown changes the owner of a File to a User. +func Chown(file *os.File, user *user.User) error { + uid := AtoiOrPanic(user.Uid) + gid := AtoiOrPanic(user.Gid) + return file.Chown(uid, gid) +} + +// IsKernelVersionAtLeast returns true if the Linux kernel version is at least +// major.minor. If something goes wrong it assumes false. +func IsKernelVersionAtLeast(major, minor int) bool { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + log.Printf("Uname failed [%v], assuming old kernel", err) + return false + } + release := string(uname.Release[:]) + log.Printf("Kernel version is %s", release) + var actualMajor, actualMinor int + if n, _ := fmt.Sscanf(release, "%d.%d", &actualMajor, &actualMinor); n != 2 { + log.Printf("Unrecognized uname format %q, assuming old kernel", release) + return false + } + return actualMajor > major || + (actualMajor == major && actualMinor >= minor) +} diff --git a/vendor/github.com/pkg/xattr/.gitignore b/vendor/github.com/pkg/xattr/.gitignore new file mode 100644 index 000000000..d8b32652e --- /dev/null +++ b/vendor/github.com/pkg/xattr/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test + +*.swp diff --git a/vendor/github.com/pkg/xattr/LICENSE b/vendor/github.com/pkg/xattr/LICENSE new file mode 100644 index 000000000..99d2e9dc8 --- /dev/null +++ b/vendor/github.com/pkg/xattr/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2012 Dave Cheney. All rights reserved. +Copyright (c) 2014 Kuba Podgórski. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/xattr/README.md b/vendor/github.com/pkg/xattr/README.md new file mode 100644 index 000000000..0662c0208 --- /dev/null +++ b/vendor/github.com/pkg/xattr/README.md @@ -0,0 +1,45 @@ +[![GoDoc](https://godoc.org/github.com/pkg/xattr?status.svg)](http://godoc.org/github.com/pkg/xattr) +[![Go Report Card](https://goreportcard.com/badge/github.com/pkg/xattr)](https://goreportcard.com/report/github.com/pkg/xattr) +[![Build Status](https://github.com/pkg/xattr/workflows/build/badge.svg)](https://github.com/pkg/xattr/actions?query=workflow%3Abuild) +[![Codecov](https://codecov.io/gh/pkg/xattr/branch/master/graph/badge.svg)](https://codecov.io/gh/pkg/xattr) + +xattr +===== +Extended attribute support for Go (linux + darwin + freebsd + netbsd + solaris). + +"Extended attributes are name:value pairs associated permanently with files and directories, similar to the environment strings associated with a process. An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty." [See more...](https://en.wikipedia.org/wiki/Extended_file_attributes) + +`SetWithFlags` allows to additionally pass system flags to be forwarded to the underlying calls. FreeBSD and NetBSD do not support this and the parameter will be ignored. + +The `L` variants of all functions (`LGet/LSet/...`) are identical to `Get/Set/...` except that they +do not reference a symlink that appears at the end of a path. See +[GoDoc](http://godoc.org/github.com/pkg/xattr) for details. + +### Example +```go + const path = "/tmp/myfile" + const prefix = "user." + + if err := xattr.Set(path, prefix+"test", []byte("test-attr-value")); err != nil { + log.Fatal(err) + } + + var list []string + if list, err = xattr.List(path); err != nil { + log.Fatal(err) + } + + var data []byte + if data, err = xattr.Get(path, prefix+"test"); err != nil { + log.Fatal(err) + } + + if err = xattr.Remove(path, prefix+"test"); err != nil { + log.Fatal(err) + } + + // One can also specify the flags parameter to be passed to the OS. + if err := xattr.SetWithFlags(path, prefix+"test", []byte("test-attr-value"), xattr.XATTR_CREATE); err != nil { + log.Fatal(err) + } +``` diff --git a/vendor/github.com/pkg/xattr/xattr.go b/vendor/github.com/pkg/xattr/xattr.go new file mode 100644 index 000000000..f982da304 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr.go @@ -0,0 +1,255 @@ +/* +Package xattr provides support for extended attributes on linux, darwin and freebsd. +Extended attributes are name:value pairs associated permanently with files and directories, +similar to the environment strings associated with a process. +An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty. +More details you can find here: https://en.wikipedia.org/wiki/Extended_file_attributes . + +All functions are provided in triples: Get/LGet/FGet, Set/LSet/FSet etc. The "L" +variant will not follow a symlink at the end of the path, and "F" variant accepts +a file descriptor instead of a path. + +Example for "L" variant, assuming path is "/symlink1/symlink2", where both components are +symlinks: +Get will follow "symlink1" and "symlink2" and operate on the target of +"symlink2". LGet will follow "symlink1" but operate directly on "symlink2". +*/ +package xattr + +import ( + "os" + "syscall" +) + +// Error records an error and the operation, file path and attribute that caused it. +type Error struct { + Op string + Path string + Name string + Err error +} + +func (e *Error) Error() (errstr string) { + if e.Op != "" { + errstr += e.Op + } + if e.Path != "" { + if errstr != "" { + errstr += " " + } + errstr += e.Path + } + if e.Name != "" { + if errstr != "" { + errstr += " " + } + errstr += e.Name + } + if e.Err != nil { + if errstr != "" { + errstr += ": " + } + errstr += e.Err.Error() + } + return +} + +// Get retrieves extended attribute data associated with path. It will follow +// all symlinks along the path. +func Get(path, name string) ([]byte, error) { + return get(path, name, func(name string, data []byte) (int, error) { + return getxattr(path, name, data) + }) +} + +// LGet is like Get but does not follow a symlink at the end of the path. +func LGet(path, name string) ([]byte, error) { + return get(path, name, func(name string, data []byte) (int, error) { + return lgetxattr(path, name, data) + }) +} + +// FGet is like Get but accepts a os.File instead of a file path. +func FGet(f *os.File, name string) ([]byte, error) { + return get(f.Name(), name, func(name string, data []byte) (int, error) { + return fgetxattr(f, name, data) + }) +} + +type getxattrFunc func(name string, data []byte) (int, error) + +// get contains the buffer allocation logic used by both Get and LGet. +func get(path string, name string, getxattrFunc getxattrFunc) ([]byte, error) { + const ( + // Start with a 1 KB buffer for the xattr value + initialBufSize = 1024 + + // The theoretical maximum xattr value size on MacOS is 64 MB. On Linux it's + // much smaller at 64 KB. Unless the kernel is evil or buggy, we should never + // hit the limit. + maxBufSize = 64 * 1024 * 1024 + + // Function name as reported in error messages + myname = "xattr.get" + ) + + size := initialBufSize + for { + data := make([]byte, size) + read, err := getxattrFunc(name, data) + + // If the buffer was too small to fit the value, Linux and MacOS react + // differently: + // Linux: returns an ERANGE error and "-1" bytes. + // MacOS: truncates the value and returns "size" bytes. If the value + // happens to be exactly as big as the buffer, we cannot know if it was + // truncated, and we retry with a bigger buffer. Contrary to documentation, + // MacOS never seems to return ERANGE! + // To keep the code simple, we always check both conditions, and sometimes + // double the buffer size without it being strictly necessary. + if err == syscall.ERANGE || read == size { + // The buffer was too small. Try again. + size <<= 1 + if size >= maxBufSize { + return nil, &Error{myname, path, name, syscall.EOVERFLOW} + } + continue + } + if err != nil { + return nil, &Error{myname, path, name, err} + } + return data[:read], nil + } +} + +// Set associates name and data together as an attribute of path. +func Set(path, name string, data []byte) error { + if err := setxattr(path, name, data, 0); err != nil { + return &Error{"xattr.Set", path, name, err} + } + return nil +} + +// LSet is like Set but does not follow a symlink at +// the end of the path. +func LSet(path, name string, data []byte) error { + if err := lsetxattr(path, name, data, 0); err != nil { + return &Error{"xattr.LSet", path, name, err} + } + return nil +} + +// FSet is like Set but accepts a os.File instead of a file path. +func FSet(f *os.File, name string, data []byte) error { + if err := fsetxattr(f, name, data, 0); err != nil { + return &Error{"xattr.FSet", f.Name(), name, err} + } + return nil +} + +// SetWithFlags associates name and data together as an attribute of path. +// Forwards the flags parameter to the syscall layer. +func SetWithFlags(path, name string, data []byte, flags int) error { + if err := setxattr(path, name, data, flags); err != nil { + return &Error{"xattr.SetWithFlags", path, name, err} + } + return nil +} + +// LSetWithFlags is like SetWithFlags but does not follow a symlink at +// the end of the path. +func LSetWithFlags(path, name string, data []byte, flags int) error { + if err := lsetxattr(path, name, data, flags); err != nil { + return &Error{"xattr.LSetWithFlags", path, name, err} + } + return nil +} + +// FSetWithFlags is like SetWithFlags but accepts a os.File instead of a file path. +func FSetWithFlags(f *os.File, name string, data []byte, flags int) error { + if err := fsetxattr(f, name, data, flags); err != nil { + return &Error{"xattr.FSetWithFlags", f.Name(), name, err} + } + return nil +} + +// Remove removes the attribute associated with the given path. +func Remove(path, name string) error { + if err := removexattr(path, name); err != nil { + return &Error{"xattr.Remove", path, name, err} + } + return nil +} + +// LRemove is like Remove but does not follow a symlink at the end of the +// path. +func LRemove(path, name string) error { + if err := lremovexattr(path, name); err != nil { + return &Error{"xattr.LRemove", path, name, err} + } + return nil +} + +// FRemove is like Remove but accepts a os.File instead of a file path. +func FRemove(f *os.File, name string) error { + if err := fremovexattr(f, name); err != nil { + return &Error{"xattr.FRemove", f.Name(), name, err} + } + return nil +} + +// List retrieves a list of names of extended attributes associated +// with the given path in the file system. +func List(path string) ([]string, error) { + return list(path, func(data []byte) (int, error) { + return listxattr(path, data) + }) +} + +// LList is like List but does not follow a symlink at the end of the +// path. +func LList(path string) ([]string, error) { + return list(path, func(data []byte) (int, error) { + return llistxattr(path, data) + }) +} + +// FList is like List but accepts a os.File instead of a file path. +func FList(f *os.File) ([]string, error) { + return list(f.Name(), func(data []byte) (int, error) { + return flistxattr(f, data) + }) +} + +type listxattrFunc func(data []byte) (int, error) + +// list contains the buffer allocation logic used by both List and LList. +func list(path string, listxattrFunc listxattrFunc) ([]string, error) { + myname := "xattr.list" + // find size. + size, err := listxattrFunc(nil) + if err != nil { + return nil, &Error{myname, path, "", err} + } + if size > 0 { + // `size + 1` because of ERANGE error when reading + // from a SMB1 mount point (https://github.com/pkg/xattr/issues/16). + buf := make([]byte, size+1) + // Read into buffer of that size. + read, err := listxattrFunc(buf) + if err != nil { + return nil, &Error{myname, path, "", err} + } + return stringsFromByteSlice(buf[:read]), nil + } + return []string{}, nil +} + +// bytePtrFromSlice returns a pointer to array of bytes and a size. +func bytePtrFromSlice(data []byte) (ptr *byte, size int) { + size = len(data) + if size > 0 { + ptr = &data[0] + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_bsd.go b/vendor/github.com/pkg/xattr/xattr_bsd.go new file mode 100644 index 000000000..f4a3f9539 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_bsd.go @@ -0,0 +1,201 @@ +//go:build freebsd || netbsd +// +build freebsd netbsd + +package xattr + +import ( + "os" + "syscall" + "unsafe" +) + +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + EXTATTR_NAMESPACE_USER = 1 + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENOATTR +) + +func getxattr(path string, name string, data []byte) (int, error) { + return sysGet(syscall.SYS_EXTATTR_GET_FILE, path, name, data) +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return sysGet(syscall.SYS_EXTATTR_GET_LINK, path, name, data) +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + return getxattr(f.Name(), name, data) +} + +// sysGet is called by getxattr and lgetxattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysGet(syscallNum uintptr, path string, name string, data []byte) (int, error) { + ptr, nbytes := bytePtrFromSlice(data) + /* + ssize_t extattr_get_file( + const char *path, + int attrnamespace, + const char *attrname, + void *data, + size_t nbytes); + + ssize_t extattr_get_link( + const char *path, + int attrnamespace, + const char *attrname, + void *data, + size_t nbytes); + */ + r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), + uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0) + if err != syscall.Errno(0) { + return int(r0), err + } + return int(r0), nil +} + +func setxattr(path string, name string, data []byte, flags int) error { + return sysSet(syscall.SYS_EXTATTR_SET_FILE, path, name, data) +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return sysSet(syscall.SYS_EXTATTR_SET_LINK, path, name, data) +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return setxattr(f.Name(), name, data, flags) +} + +// sysSet is called by setxattr and lsetxattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysSet(syscallNum uintptr, path string, name string, data []byte) error { + ptr, nbytes := bytePtrFromSlice(data) + /* + ssize_t extattr_set_file( + const char *path, + int attrnamespace, + const char *attrname, + const void *data, + size_t nbytes + ); + + ssize_t extattr_set_link( + const char *path, + int attrnamespace, + const char *attrname, + const void *data, + size_t nbytes + ); + */ + r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), + uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0) + if err != syscall.Errno(0) { + return err + } + if int(r0) != nbytes { + return syscall.E2BIG + } + return nil +} + +func removexattr(path string, name string) error { + return sysRemove(syscall.SYS_EXTATTR_DELETE_FILE, path, name) +} + +func lremovexattr(path string, name string) error { + return sysRemove(syscall.SYS_EXTATTR_DELETE_LINK, path, name) +} + +func fremovexattr(f *os.File, name string) error { + return removexattr(f.Name(), name) +} + +// sysSet is called by removexattr and lremovexattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysRemove(syscallNum uintptr, path string, name string) error { + /* + int extattr_delete_file( + const char *path, + int attrnamespace, + const char *attrname + ); + + int extattr_delete_link( + const char *path, + int attrnamespace, + const char *attrname + ); + */ + _, _, err := syscall.Syscall(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), + ) + if err != syscall.Errno(0) { + return err + } + return nil +} + +func listxattr(path string, data []byte) (int, error) { + return sysList(syscall.SYS_EXTATTR_LIST_FILE, path, data) +} + +func llistxattr(path string, data []byte) (int, error) { + return sysList(syscall.SYS_EXTATTR_LIST_LINK, path, data) +} + +func flistxattr(f *os.File, data []byte) (int, error) { + return listxattr(f.Name(), data) +} + +// sysSet is called by listxattr and llistxattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysList(syscallNum uintptr, path string, data []byte) (int, error) { + ptr, nbytes := bytePtrFromSlice(data) + /* + ssize_t extattr_list_file( + const char *path, + int attrnamespace, + void *data, + size_t nbytes + ); + + ssize_t extattr_list_link( + const char *path, + int attrnamespace, + void *data, + size_t nbytes + ); + */ + r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0, 0) + if err != syscall.Errno(0) { + return int(r0), err + } + return int(r0), nil +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// On FreeBSD, each entry consists of a single byte containing the length +// of the attribute name, followed by the attribute name. +// The name is _not_ terminated by NULL. +func stringsFromByteSlice(buf []byte) (result []string) { + index := 0 + for index < len(buf) { + next := index + 1 + int(buf[index]) + result = append(result, string(buf[index+1:next])) + index = next + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_darwin.go b/vendor/github.com/pkg/xattr/xattr_darwin.go new file mode 100644 index 000000000..ee7a501da --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_darwin.go @@ -0,0 +1,90 @@ +//go:build darwin +// +build darwin + +package xattr + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +// See https://opensource.apple.com/source/xnu/xnu-1504.15.3/bsd/sys/xattr.h.auto.html +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + XATTR_NOFOLLOW = 0x0001 + XATTR_CREATE = 0x0002 + XATTR_REPLACE = 0x0004 + XATTR_NOSECURITY = 0x0008 + XATTR_NODEFAULT = 0x0010 + XATTR_SHOWCOMPRESSION = 0x0020 + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENOATTR +) + +func getxattr(path string, name string, data []byte) (int, error) { + return unix.Getxattr(path, name, data) +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return unix.Lgetxattr(path, name, data) +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + return getxattr(f.Name(), name, data) +} + +func setxattr(path string, name string, data []byte, flags int) error { + return unix.Setxattr(path, name, data, flags) +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return unix.Lsetxattr(path, name, data, flags) +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return setxattr(f.Name(), name, data, flags) +} + +func removexattr(path string, name string) error { + return unix.Removexattr(path, name) +} + +func lremovexattr(path string, name string) error { + return unix.Lremovexattr(path, name) +} + +func fremovexattr(f *os.File, name string) error { + return removexattr(f.Name(), name) +} + +func listxattr(path string, data []byte) (int, error) { + return unix.Listxattr(path, data) +} + +func llistxattr(path string, data []byte) (int, error) { + return unix.Llistxattr(path, data) +} + +func flistxattr(f *os.File, data []byte) (int, error) { + return listxattr(f.Name(), data) +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// On Darwin and Linux, each entry is a NULL-terminated string. +func stringsFromByteSlice(buf []byte) (result []string) { + offset := 0 + for index, b := range buf { + if b == 0 { + result = append(result, string(buf[offset:index])) + offset = index + 1 + } + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_linux.go b/vendor/github.com/pkg/xattr/xattr_linux.go new file mode 100644 index 000000000..879085ee5 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_linux.go @@ -0,0 +1,142 @@ +//go:build linux +// +build linux + +package xattr + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + XATTR_CREATE = unix.XATTR_CREATE + XATTR_REPLACE = unix.XATTR_REPLACE + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENODATA +) + +// On Linux, FUSE and CIFS filesystems can return EINTR for interrupted system +// calls. This function works around this by retrying system calls until they +// stop returning EINTR. +// +// See https://github.com/golang/go/commit/6b420169d798c7ebe733487b56ea5c3fa4aab5ce. +func ignoringEINTR(fn func() error) (err error) { + for { + err = fn() + if err != unix.EINTR { + break + } + } + return err +} + +func getxattr(path string, name string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Getxattr(path, name, data) + return err + }) + return r, err +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Lgetxattr(path, name, data) + return err + }) + return r, err +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Fgetxattr(int(f.Fd()), name, data) + return err + }) + return r, err +} + +func setxattr(path string, name string, data []byte, flags int) error { + return ignoringEINTR(func() (err error) { + return unix.Setxattr(path, name, data, flags) + }) +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return ignoringEINTR(func() (err error) { + return unix.Lsetxattr(path, name, data, flags) + }) +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return ignoringEINTR(func() (err error) { + return unix.Fsetxattr(int(f.Fd()), name, data, flags) + }) +} + +func removexattr(path string, name string) error { + return ignoringEINTR(func() (err error) { + return unix.Removexattr(path, name) + }) +} + +func lremovexattr(path string, name string) error { + return ignoringEINTR(func() (err error) { + return unix.Lremovexattr(path, name) + }) +} + +func fremovexattr(f *os.File, name string) error { + return ignoringEINTR(func() (err error) { + return unix.Fremovexattr(int(f.Fd()), name) + }) +} + +func listxattr(path string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Listxattr(path, data) + return err + }) + return r, err +} + +func llistxattr(path string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Llistxattr(path, data) + return err + }) + return r, err +} + +func flistxattr(f *os.File, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Flistxattr(int(f.Fd()), data) + return err + }) + return r, err +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// On Darwin and Linux, each entry is a NULL-terminated string. +func stringsFromByteSlice(buf []byte) (result []string) { + offset := 0 + for index, b := range buf { + if b == 0 { + result = append(result, string(buf[offset:index])) + offset = index + 1 + } + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_solaris.go b/vendor/github.com/pkg/xattr/xattr_solaris.go new file mode 100644 index 000000000..38d88d609 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_solaris.go @@ -0,0 +1,165 @@ +//go:build solaris +// +build solaris + +package xattr + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENODATA +) + +func getxattr(path string, name string, data []byte) (int, error) { + f, err := os.OpenFile(path, os.O_RDONLY, 0) + if err != nil { + return 0, err + } + defer func() { + _ = f.Close() + }() + return fgetxattr(f, name, data) +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return 0, unix.ENOTSUP +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + fd, err := unix.Openat(int(f.Fd()), name, unix.O_RDONLY|unix.O_XATTR, 0) + if err != nil { + return 0, err + } + defer func() { + _ = unix.Close(fd) + }() + return unix.Read(fd, data) +} + +func setxattr(path string, name string, data []byte, flags int) error { + f, err := os.OpenFile(path, os.O_RDONLY, 0) + if err != nil { + return err + } + err = fsetxattr(f, name, data, flags) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return unix.ENOTSUP +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + mode := unix.O_WRONLY | unix.O_XATTR + if flags&XATTR_REPLACE != 0 { + mode |= unix.O_TRUNC + } else if flags&XATTR_CREATE != 0 { + mode |= unix.O_CREAT | unix.O_EXCL + } else { + mode |= unix.O_CREAT | unix.O_TRUNC + } + fd, err := unix.Openat(int(f.Fd()), name, mode, 0666) + if err != nil { + return err + } + if _, err = unix.Write(fd, data); err != nil { + _ = unix.Close(fd) + return err + } + return unix.Close(fd) +} + +func removexattr(path string, name string) error { + fd, err := unix.Open(path, unix.O_RDONLY|unix.O_XATTR, 0) + if err != nil { + return err + } + f := os.NewFile(uintptr(fd), path) + defer func() { + _ = f.Close() + }() + return fremovexattr(f, name) +} + +func lremovexattr(path string, name string) error { + return unix.ENOTSUP +} + +func fremovexattr(f *os.File, name string) error { + fd, err := unix.Openat(int(f.Fd()), ".", unix.O_XATTR, 0) + if err != nil { + return err + } + defer func() { + _ = unix.Close(fd) + }() + return unix.Unlinkat(fd, name, 0) +} + +func listxattr(path string, data []byte) (int, error) { + f, err := os.OpenFile(path, os.O_RDONLY, 0) + if err != nil { + return 0, err + } + defer func() { + _ = f.Close() + }() + return flistxattr(f, data) +} + +func llistxattr(path string, data []byte) (int, error) { + return 0, unix.ENOTSUP +} + +func flistxattr(f *os.File, data []byte) (int, error) { + fd, err := unix.Openat(int(f.Fd()), ".", unix.O_RDONLY|unix.O_XATTR, 0) + if err != nil { + return 0, err + } + xf := os.NewFile(uintptr(fd), f.Name()) + defer func() { + _ = xf.Close() + }() + names, err := xf.Readdirnames(-1) + if err != nil { + return 0, err + } + var buf []byte + for _, name := range names { + buf = append(buf, append([]byte(name), '\000')...) + } + if data == nil { + return len(buf), nil + } + return copy(data, buf), nil +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// On Darwin and Linux, each entry is a NULL-terminated string. +func stringsFromByteSlice(buf []byte) (result []string) { + offset := 0 + for index, b := range buf { + if b == 0 { + result = append(result, string(buf[offset:index])) + offset = index + 1 + } + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_unsupported.go b/vendor/github.com/pkg/xattr/xattr_unsupported.go new file mode 100644 index 000000000..4153decb1 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_unsupported.go @@ -0,0 +1,70 @@ +//go:build !linux && !freebsd && !netbsd && !darwin && !solaris +// +build !linux,!freebsd,!netbsd,!darwin,!solaris + +package xattr + +import ( + "os" + "syscall" +) + +const ( + // We need to use the default for non supported operating systems + ENOATTR = syscall.ENODATA +) + +// XATTR_SUPPORTED will be true if the current platform is supported +const XATTR_SUPPORTED = false + +func getxattr(path string, name string, data []byte) (int, error) { + return 0, nil +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return 0, nil +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + return 0, nil +} + +func setxattr(path string, name string, data []byte, flags int) error { + return nil +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return nil +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return nil +} + +func removexattr(path string, name string) error { + return nil +} + +func lremovexattr(path string, name string) error { + return nil +} + +func fremovexattr(f *os.File, name string) error { + return nil +} + +func listxattr(path string, data []byte) (int, error) { + return 0, nil +} + +func llistxattr(path string, data []byte) (int, error) { + return 0, nil +} + +func flistxattr(f *os.File, data []byte) (int, error) { + return 0, nil +} + +// dummy +func stringsFromByteSlice(buf []byte) (result []string) { + return []string{} +} diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go new file mode 100644 index 000000000..b423feaea --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -0,0 +1,285 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package argon2 implements the key derivation function Argon2. +// Argon2 was selected as the winner of the Password Hashing Competition and can +// be used to derive cryptographic keys from passwords. +// +// For a detailed specification of Argon2 see [1]. +// +// If you aren't sure which function you need, use Argon2id (IDKey) and +// the parameter recommendations for your scenario. +// +// +// Argon2i +// +// Argon2i (implemented by Key) is the side-channel resistant version of Argon2. +// It uses data-independent memory access, which is preferred for password +// hashing and password-based key derivation. Argon2i requires more passes over +// memory than Argon2id to protect from trade-off attacks. The recommended +// parameters (taken from [2]) for non-interactive operations are time=3 and to +// use the maximum available memory. +// +// +// Argon2id +// +// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining +// Argon2i and Argon2d. It uses data-independent memory access for the first +// half of the first iteration over the memory and data-dependent memory access +// for the rest. Argon2id is side-channel resistant and provides better brute- +// force cost savings due to time-memory tradeoffs than Argon2i. The recommended +// parameters for non-interactive operations (taken from [2]) are time=1 and to +// use the maximum available memory. +// +// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 +package argon2 + +import ( + "encoding/binary" + "sync" + + "golang.org/x/crypto/blake2b" +) + +// The Argon2 version implemented by this package. +const Version = 0x13 + +const ( + argon2d = iota + argon2i + argon2id +) + +// Key derives a key from the password, salt, and cost parameters using Argon2i +// returning a byte slice of length keyLen that can be used as cryptographic +// key. The CPU cost and parallelism degree must be greater than zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) +// +// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. +// If using that amount of memory (32 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be +// adjusted to the number of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) +} + +// IDKey derives a key from the password, salt, and cost parameters using +// Argon2id returning a byte slice of length keyLen that can be used as +// cryptographic key. The CPU cost and parallelism degree must be greater than +// zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) +// +// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. +// If using that amount of memory (64 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be +// adjusted to the numbers of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) +} + +func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + if time < 1 { + panic("argon2: number of rounds too small") + } + if threads < 1 { + panic("argon2: parallelism degree too low") + } + h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode) + + memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads)) + if memory < 2*syncPoints*uint32(threads) { + memory = 2 * syncPoints * uint32(threads) + } + B := initBlocks(&h0, memory, uint32(threads)) + processBlocks(B, time, memory, uint32(threads), mode) + return extractKey(B, memory, uint32(threads), keyLen) +} + +const ( + blockLength = 128 + syncPoints = 4 +) + +type block [blockLength]uint64 + +func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte { + var ( + h0 [blake2b.Size + 8]byte + params [24]byte + tmp [4]byte + ) + + b2, _ := blake2b.New512(nil) + binary.LittleEndian.PutUint32(params[0:4], threads) + binary.LittleEndian.PutUint32(params[4:8], keyLen) + binary.LittleEndian.PutUint32(params[8:12], memory) + binary.LittleEndian.PutUint32(params[12:16], time) + binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) + binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) + b2.Write(params[:]) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) + b2.Write(tmp[:]) + b2.Write(password) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) + b2.Write(tmp[:]) + b2.Write(salt) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) + b2.Write(tmp[:]) + b2.Write(key) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) + b2.Write(tmp[:]) + b2.Write(data) + b2.Sum(h0[:0]) + return h0 +} + +func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block { + var block0 [1024]byte + B := make([]block, memory) + for lane := uint32(0); lane < threads; lane++ { + j := lane * (memory / threads) + binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) + blake2bHash(block0[:], h0[:]) + for i := range B[j+0] { + B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) + blake2bHash(block0[:], h0[:]) + for i := range B[j+1] { + B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + } + return B +} + +func processBlocks(B []block, time, memory, threads uint32, mode int) { + lanes := memory / threads + segments := lanes / syncPoints + + processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { + var addresses, in, zero block + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + in[0] = uint64(n) + in[1] = uint64(lane) + in[2] = uint64(slice) + in[3] = uint64(memory) + in[4] = uint64(time) + in[5] = uint64(mode) + } + + index := uint32(0) + if n == 0 && slice == 0 { + index = 2 // we have already generated the first two blocks + if mode == argon2i || mode == argon2id { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + } + + offset := lane*lanes + slice*segments + index + var random uint64 + for index < segments { + prev := offset - 1 + if index == 0 && slice == 0 { + prev += lanes // last block in lane + } + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + if index%blockLength == 0 { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + random = addresses[index%blockLength] + } else { + random = B[prev][0] + } + newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) + processBlockXOR(&B[offset], &B[prev], &B[newOffset]) + index, offset = index+1, offset+1 + } + wg.Done() + } + + for n := uint32(0); n < time; n++ { + for slice := uint32(0); slice < syncPoints; slice++ { + var wg sync.WaitGroup + for lane := uint32(0); lane < threads; lane++ { + wg.Add(1) + go processSegment(n, slice, lane, &wg) + } + wg.Wait() + } + } + +} + +func extractKey(B []block, memory, threads, keyLen uint32) []byte { + lanes := memory / threads + for lane := uint32(0); lane < threads-1; lane++ { + for i, v := range B[(lane*lanes)+lanes-1] { + B[memory-1][i] ^= v + } + } + + var block [1024]byte + for i, v := range B[memory-1] { + binary.LittleEndian.PutUint64(block[i*8:], v) + } + key := make([]byte, keyLen) + blake2bHash(key, block[:]) + return key +} + +func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { + refLane := uint32(rand>>32) % threads + if n == 0 && slice == 0 { + refLane = lane + } + m, s := 3*segments, ((slice+1)%syncPoints)*segments + if lane == refLane { + m += index + } + if n == 0 { + m, s = slice*segments, 0 + if slice == 0 || lane == refLane { + m += index + } + } + if index == 0 || lane == refLane { + m-- + } + return phi(rand, uint64(m), uint64(s), refLane, lanes) +} + +func phi(rand, m, s uint64, lane, lanes uint32) uint32 { + p := rand & 0xFFFFFFFF + p = (p * p) >> 32 + p = (p * m) >> 32 + return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) +} diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go new file mode 100644 index 000000000..10f46948d --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blake2b.go @@ -0,0 +1,53 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +import ( + "encoding/binary" + "hash" + + "golang.org/x/crypto/blake2b" +) + +// blake2bHash computes an arbitrary long hash value of in +// and writes the hash to out. +func blake2bHash(out []byte, in []byte) { + var b2 hash.Hash + if n := len(out); n < blake2b.Size { + b2, _ = blake2b.New(n, nil) + } else { + b2, _ = blake2b.New512(nil) + } + + var buffer [blake2b.Size]byte + binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) + b2.Write(buffer[:4]) + b2.Write(in) + + if len(out) <= blake2b.Size { + b2.Sum(out[:0]) + return + } + + outLen := len(out) + b2.Sum(buffer[:0]) + b2.Reset() + copy(out, buffer[:32]) + out = out[32:] + for len(out) > blake2b.Size { + b2.Write(buffer[:]) + b2.Sum(buffer[:0]) + copy(out, buffer[:32]) + out = out[32:] + b2.Reset() + } + + if outLen%blake2b.Size > 0 { // outLen > 64 + r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 + b2, _ = blake2b.New(outLen-32*r, nil) + } + b2.Write(buffer[:]) + b2.Sum(out[:0]) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go new file mode 100644 index 000000000..a014ac92a --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -0,0 +1,61 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +package argon2 + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func mixBlocksSSE2(out, a, b, c *block) + +//go:noescape +func xorBlocksSSE2(out, a, b, c *block) + +//go:noescape +func blamkaSSE4(b *block) + +func processBlockSSE(out, in1, in2 *block, xor bool) { + var t block + mixBlocksSSE2(&t, in1, in2, &t) + if useSSE4 { + blamkaSSE4(&t) + } else { + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + } + if xor { + xorBlocksSSE2(out, in1, in2, &t) + } else { + mixBlocksSSE2(out, in1, in2, &t) + } +} + +func processBlock(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s new file mode 100644 index 000000000..b2cc05150 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -0,0 +1,244 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +#include "textflag.h" + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFD $0xB1, v6, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + PSHUFB c40, v2; \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFB c48, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + MOVO v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v7, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + PSHUFB c40, v3; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFB c48, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + MOVO v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG_0(block, off) \ + MOVOU 8*(off+0)(block), X0; \ + MOVOU 8*(off+2)(block), X1; \ + MOVOU 8*(off+4)(block), X2; \ + MOVOU 8*(off+6)(block), X3; \ + MOVOU 8*(off+8)(block), X4; \ + MOVOU 8*(off+10)(block), X5; \ + MOVOU 8*(off+12)(block), X6; \ + MOVOU 8*(off+14)(block), X7 + +#define STORE_MSG_0(block, off) \ + MOVOU X0, 8*(off+0)(block); \ + MOVOU X1, 8*(off+2)(block); \ + MOVOU X2, 8*(off+4)(block); \ + MOVOU X3, 8*(off+6)(block); \ + MOVOU X4, 8*(off+8)(block); \ + MOVOU X5, 8*(off+10)(block); \ + MOVOU X6, 8*(off+12)(block); \ + MOVOU X7, 8*(off+14)(block) + +#define LOAD_MSG_1(block, off) \ + MOVOU 8*off+0*8(block), X0; \ + MOVOU 8*off+16*8(block), X1; \ + MOVOU 8*off+32*8(block), X2; \ + MOVOU 8*off+48*8(block), X3; \ + MOVOU 8*off+64*8(block), X4; \ + MOVOU 8*off+80*8(block), X5; \ + MOVOU 8*off+96*8(block), X6; \ + MOVOU 8*off+112*8(block), X7 + +#define STORE_MSG_1(block, off) \ + MOVOU X0, 8*off+0*8(block); \ + MOVOU X1, 8*off+16*8(block); \ + MOVOU X2, 8*off+32*8(block); \ + MOVOU X3, 8*off+48*8(block); \ + MOVOU X4, 8*off+64*8(block); \ + MOVOU X5, 8*off+80*8(block); \ + MOVOU X6, 8*off+96*8(block); \ + MOVOU X7, 8*off+112*8(block) + +#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ + LOAD_MSG_0(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_0(block, off) + +#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ + LOAD_MSG_1(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_1(block, off) + +// func blamkaSSE4(b *block) +TEXT ·blamkaSSE4(SB), 4, $0-8 + MOVQ b+0(FP), AX + + MOVOU ·c40<>(SB), X10 + MOVOU ·c48<>(SB), X11 + + BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) + + BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) + RET + +// func mixBlocksSSE2(out, a, b, c *block) +TEXT ·mixBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + PXOR X1, X0 + PXOR X2, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET + +// func xorBlocksSSE2(out, a, b, c *block) +TEXT ·xorBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + MOVOU 0(DX), X3 + PXOR X1, X0 + PXOR X2, X0 + PXOR X3, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go new file mode 100644 index 000000000..a481b2243 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_generic.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +var useSSE4 bool + +func processBlockGeneric(out, in1, in2 *block, xor bool) { + var t block + for i := range t { + t[i] = in1[i] ^ in2[i] + } + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + if xor { + for i := range t { + out[i] ^= in1[i] ^ in2[i] ^ t[i] + } + } else { + for i := range t { + out[i] = in1[i] ^ in2[i] ^ t[i] + } + } +} + +func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { + v00, v01, v02, v03 := *t00, *t01, *t02, *t03 + v04, v05, v06, v07 := *t04, *t05, *t06, *t07 + v08, v09, v10, v11 := *t08, *t09, *t10, *t11 + v12, v13, v14, v15 := *t12, *t13, *t14, *t15 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>32 | v12<<32 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>24 | v04<<40 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>16 | v12<<48 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>63 | v04<<1 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>32 | v13<<32 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>24 | v05<<40 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>16 | v13<<48 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>63 | v05<<1 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>32 | v14<<32 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>24 | v06<<40 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>16 | v14<<48 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>63 | v06<<1 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>32 | v15<<32 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>24 | v07<<40 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>16 | v15<<48 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>63 | v07<<1 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>32 | v15<<32 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>24 | v05<<40 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>16 | v15<<48 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>63 | v05<<1 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>32 | v12<<32 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>24 | v06<<40 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>16 | v12<<48 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>63 | v06<<1 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>32 | v13<<32 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>24 | v07<<40 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>16 | v13<<48 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>63 | v07<<1 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>32 | v14<<32 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>24 | v04<<40 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>16 | v14<<48 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>63 | v04<<1 + + *t00, *t01, *t02, *t03 = v00, v01, v02, v03 + *t04, *t05, *t06, *t07 = v04, v05, v06, v07 + *t08, *t09, *t10, *t11 = v08, v09, v10, v11 + *t12, *t13, *t14, *t15 = v12, v13, v14, v15 +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go new file mode 100644 index 000000000..167c59d2d --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc + +package argon2 + +func processBlock(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go new file mode 100644 index 000000000..dda3f143b --- /dev/null +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation +// Function (HKDF) as defined in RFC 5869. +// +// HKDF is a cryptographic key derivation function (KDF) with the goal of +// expanding limited input keying material into one or more cryptographically +// strong secret keys. +package hkdf // import "golang.org/x/crypto/hkdf" + +import ( + "crypto/hmac" + "errors" + "hash" + "io" +) + +// Extract generates a pseudorandom key for use with Expand from an input secret +// and an optional independent salt. +// +// Only use this function if you need to reuse the extracted key with multiple +// Expand invocations and different context values. Most common scenarios, +// including the generation of multiple keys, should use New instead. +func Extract(hash func() hash.Hash, secret, salt []byte) []byte { + if salt == nil { + salt = make([]byte, hash().Size()) + } + extractor := hmac.New(hash, salt) + extractor.Write(secret) + return extractor.Sum(nil) +} + +type hkdf struct { + expander hash.Hash + size int + + info []byte + counter byte + + prev []byte + buf []byte +} + +func (f *hkdf) Read(p []byte) (int, error) { + // Check whether enough data can be generated + need := len(p) + remains := len(f.buf) + int(255-f.counter+1)*f.size + if remains < need { + return 0, errors.New("hkdf: entropy limit reached") + } + // Read any leftover from the buffer + n := copy(p, f.buf) + p = p[n:] + + // Fill the rest of the buffer + for len(p) > 0 { + f.expander.Reset() + f.expander.Write(f.prev) + f.expander.Write(f.info) + f.expander.Write([]byte{f.counter}) + f.prev = f.expander.Sum(f.prev[:0]) + f.counter++ + + // Copy the new batch into p + f.buf = f.prev + n = copy(p, f.buf) + p = p[n:] + } + // Save leftovers for next run + f.buf = f.buf[n:] + + return need, nil +} + +// Expand returns a Reader, from which keys can be read, using the given +// pseudorandom key and optional context info, skipping the extraction step. +// +// The pseudorandomKey should have been generated by Extract, or be a uniformly +// random or pseudorandom cryptographically strong key. See RFC 5869, Section +// 3.3. Most common scenarios will want to use New instead. +func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader { + expander := hmac.New(hash, pseudorandomKey) + return &hkdf{expander, expander.Size(), info, 1, nil, nil} +} + +// New returns a Reader, from which keys can be read, using the given hash, +// secret, salt and context info. Salt and info can be nil. +func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader { + prk := Extract(hash, secret, salt) + return Expand(hash, prk, info) +} diff --git a/vendor/golang.org/x/text/AUTHORS b/vendor/golang.org/x/text/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/text/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/text/CONTRIBUTORS b/vendor/golang.org/x/text/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/text/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/text/cases/trieval.go b/vendor/golang.org/x/text/cases/trieval.go index 99e039628..4e4d13fe5 100644 --- a/vendor/golang.org/x/text/cases/trieval.go +++ b/vendor/golang.org/x/text/cases/trieval.go @@ -14,19 +14,19 @@ package cases // // The per-rune values have the following format: // -// if (exception) { -// 15..4 unsigned exception index -// } else { -// 15..8 XOR pattern or index to XOR pattern for case mapping -// Only 13..8 are used for XOR patterns. -// 7 inverseFold (fold to upper, not to lower) -// 6 index: interpret the XOR pattern as an index -// or isMid if case mode is cIgnorableUncased. -// 5..4 CCC: zero (normal or break), above or other -// } -// 3 exception: interpret this value as an exception index -// (TODO: is this bit necessary? Probably implied from case mode.) -// 2..0 case mode +// if (exception) { +// 15..4 unsigned exception index +// } else { +// 15..8 XOR pattern or index to XOR pattern for case mapping +// Only 13..8 are used for XOR patterns. +// 7 inverseFold (fold to upper, not to lower) +// 6 index: interpret the XOR pattern as an index +// or isMid if case mode is cIgnorableUncased. +// 5..4 CCC: zero (normal or break), above or other +// } +// 3 exception: interpret this value as an exception index +// (TODO: is this bit necessary? Probably implied from case mode.) +// 2..0 case mode // // For the non-exceptional cases, a rune must be either uncased, lowercase or // uppercase. If the rune is cased, the XOR pattern maps either a lowercase @@ -128,37 +128,40 @@ const ( // The entry is pointed to by the exception index in an entry. It has the // following format: // -// Header -// byte 0: -// 7..6 unused -// 5..4 CCC type (same bits as entry) -// 3 unused -// 2..0 length of fold +// Header: // -// byte 1: -// 7..6 unused -// 5..3 length of 1st mapping of case type -// 2..0 length of 2nd mapping of case type +// byte 0: +// 7..6 unused +// 5..4 CCC type (same bits as entry) +// 3 unused +// 2..0 length of fold // -// case 1st 2nd -// lower -> upper, title -// upper -> lower, title -// title -> lower, upper +// byte 1: +// 7..6 unused +// 5..3 length of 1st mapping of case type +// 2..0 length of 2nd mapping of case type +// +// case 1st 2nd +// lower -> upper, title +// upper -> lower, title +// title -> lower, upper // // Lengths with the value 0x7 indicate no value and implies no change. // A length of 0 indicates a mapping to zero-length string. // // Body bytes: -// case folding bytes -// lowercase mapping bytes -// uppercase mapping bytes -// titlecase mapping bytes -// closure mapping bytes (for NFKC_Casefold). (TODO) +// +// case folding bytes +// lowercase mapping bytes +// uppercase mapping bytes +// titlecase mapping bytes +// closure mapping bytes (for NFKC_Casefold). (TODO) // // Fallbacks: -// missing fold -> lower -// missing title -> upper -// all missing -> original rune +// +// missing fold -> lower +// missing title -> upper +// all missing -> original rune // // exceptions starts with a dummy byte to enforce that there is no zero index // value. diff --git a/vendor/golang.org/x/text/encoding/htmlindex/tables.go b/vendor/golang.org/x/text/encoding/htmlindex/tables.go index f074e2c6d..9e6daa896 100644 --- a/vendor/golang.org/x/text/encoding/htmlindex/tables.go +++ b/vendor/golang.org/x/text/encoding/htmlindex/tables.go @@ -93,8 +93,11 @@ var canonical = [numEncodings]string{ var nameMap = map[string]htmlEncoding{ "unicode-1-1-utf-8": utf8, + "unicode11utf8": utf8, + "unicode20utf8": utf8, "utf-8": utf8, "utf8": utf8, + "x-unicode20utf8": utf8, "866": ibm866, "cp866": ibm866, "csibm866": ibm866, @@ -307,7 +310,13 @@ var nameMap = map[string]htmlEncoding{ "iso-2022-cn-ext": replacement, "iso-2022-kr": replacement, "replacement": replacement, + "unicodefffe": utf16be, "utf-16be": utf16be, + "csunicode": utf16le, + "iso-10646-ucs-2": utf16le, + "ucs-2": utf16le, + "unicode": utf16le, + "unicodefeff": utf16le, "utf-16": utf16le, "utf-16le": utf16le, "x-user-defined": xUserDefined, diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/mib.go b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go index fc7df1bc7..351fb86e2 100644 --- a/vendor/golang.org/x/text/encoding/internal/identifier/mib.go +++ b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go @@ -905,6 +905,14 @@ const ( // https://www.unicode.org/notes/tn6/ BOCU1 MIB = 1020 + // UTF7IMAP is the MIB identifier with IANA name UTF-7-IMAP. + // + // Note: This charset is used to encode Unicode in IMAP mailbox names; + // see section 5.1.3 of rfc3501 . It should never be used + // outside this context. A name has been assigned so that charset processing + // implementations can refer to it in a consistent way. + UTF7IMAP MIB = 1021 + // Windows30Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.0-Latin-1. // // Extended ISO 8859-1 Latin-1 for Windows 3.0. diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go index b89c45b03..0e0fabfd6 100644 --- a/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go +++ b/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go @@ -55,6 +55,8 @@ loop: // Microsoft's Code Page 936 extends GBK 1.0 to encode the euro sign U+20AC // as 0x80. The HTML5 specification at http://encoding.spec.whatwg.org/#gbk // says to treat "gbk" as Code Page 936. + // GBK’s decoder is gb18030’s decoder. https://encoding.spec.whatwg.org/#gbk-decoder + // If byte is 0x80, return code point U+20AC. https://encoding.spec.whatwg.org/#gb18030-decoder case c0 == 0x80: r, size = '€', 1 @@ -180,7 +182,9 @@ func (e gbkEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err // Microsoft's Code Page 936 extends GBK 1.0 to encode the euro sign U+20AC // as 0x80. The HTML5 specification at http://encoding.spec.whatwg.org/#gbk // says to treat "gbk" as Code Page 936. - if r == '€' { + // GBK’s encoder is gb18030’s encoder with its _is GBK_ set to true. https://encoding.spec.whatwg.org/#gbk-encoder + // If _is GBK_ is true and code point is U+20AC, return byte 0x80. https://encoding.spec.whatwg.org/#gb18030-encoder + if !e.gb18030 && r == '€' { r = 0x80 goto write1 } diff --git a/vendor/golang.org/x/text/internal/language/compact/tables.go b/vendor/golang.org/x/text/internal/language/compact/tables.go index fe7ad9ea7..32af9de59 100644 --- a/vendor/golang.org/x/text/internal/language/compact/tables.go +++ b/vendor/golang.org/x/text/internal/language/compact/tables.go @@ -966,7 +966,7 @@ var coreTags = []language.CompactCoreInfo{ // 773 elements 0x3fd00000, 0x3fd00072, 0x3fd000da, 0x3fd0010c, 0x3ff00000, 0x3ff000d1, 0x40100000, 0x401000c3, 0x40200000, 0x4020004c, 0x40700000, 0x40800000, - 0x4085a000, 0x4085a0ba, 0x408e3000, 0x408e30ba, + 0x4085a000, 0x4085a0ba, 0x408e8000, 0x408e80ba, 0x40c00000, 0x40c000b3, 0x41200000, 0x41200111, 0x41600000, 0x4160010f, 0x41c00000, 0x41d00000, // Entry 280 - 29F @@ -994,7 +994,7 @@ var coreTags = []language.CompactCoreInfo{ // 773 elements 0x4ae00130, 0x4b400000, 0x4b400099, 0x4b4000e8, 0x4bc00000, 0x4bc05000, 0x4bc05024, 0x4bc20000, 0x4bc20137, 0x4bc5a000, 0x4bc5a137, 0x4be00000, - 0x4be5a000, 0x4be5a0b4, 0x4beeb000, 0x4beeb0b4, + 0x4be5a000, 0x4be5a0b4, 0x4bef1000, 0x4bef10b4, 0x4c000000, 0x4c300000, 0x4c30013e, 0x4c900000, // Entry 2E0 - 2FF 0x4c900001, 0x4cc00000, 0x4cc0012f, 0x4ce00000, @@ -1012,4 +1012,4 @@ var coreTags = []language.CompactCoreInfo{ // 773 elements const specialTagsStr string = "ca-ES-valencia en-US-u-va-posix" -// Total table size 3147 bytes (3KiB); checksum: BE816D44 +// Total table size 3147 bytes (3KiB); checksum: 6772C83C diff --git a/vendor/golang.org/x/text/internal/language/lookup.go b/vendor/golang.org/x/text/internal/language/lookup.go index 6294b8152..9309dc276 100644 --- a/vendor/golang.org/x/text/internal/language/lookup.go +++ b/vendor/golang.org/x/text/internal/language/lookup.go @@ -328,7 +328,7 @@ func (r Region) IsPrivateUse() bool { return r.typ()&iso3166UserAssigned != 0 } -type Script uint8 +type Script uint16 // getScriptID returns the script id for string s. It assumes that s // is of the format [A-Z][a-z]{3}. diff --git a/vendor/golang.org/x/text/internal/language/parse.go b/vendor/golang.org/x/text/internal/language/parse.go index 47ee0fed1..aad1e0acf 100644 --- a/vendor/golang.org/x/text/internal/language/parse.go +++ b/vendor/golang.org/x/text/internal/language/parse.go @@ -270,7 +270,7 @@ func parse(scan *scanner, s string) (t Tag, err error) { } else if n >= 4 { return Und, ErrSyntax } else { // the usual case - t, end = parseTag(scan) + t, end = parseTag(scan, true) if n := len(scan.token); n == 1 { t.pExt = uint16(end) end = parseExtensions(scan) @@ -296,7 +296,8 @@ func parse(scan *scanner, s string) (t Tag, err error) { // parseTag parses language, script, region and variants. // It returns a Tag and the end position in the input that was parsed. -func parseTag(scan *scanner) (t Tag, end int) { +// If doNorm is true, then - will be normalized to . +func parseTag(scan *scanner, doNorm bool) (t Tag, end int) { var e error // TODO: set an error if an unknown lang, script or region is encountered. t.LangID, e = getLangID(scan.token) @@ -307,14 +308,17 @@ func parseTag(scan *scanner) (t Tag, end int) { for len(scan.token) == 3 && isAlpha(scan.token[0]) { // From http://tools.ietf.org/html/bcp47, - tags are equivalent // to a tag of the form . - lang, e := getLangID(scan.token) - if lang != 0 { - t.LangID = lang - copy(scan.b[langStart:], lang.String()) - scan.b[langStart+3] = '-' - scan.start = langStart + 4 + if doNorm { + lang, e := getLangID(scan.token) + if lang != 0 { + t.LangID = lang + langStr := lang.String() + copy(scan.b[langStart:], langStr) + scan.b[langStart+len(langStr)] = '-' + scan.start = langStart + len(langStr) + 1 + } + scan.gobble(e) } - scan.gobble(e) end = scan.scan() } if len(scan.token) == 4 && isAlpha(scan.token[0]) { @@ -559,7 +563,7 @@ func parseExtension(scan *scanner) int { case 't': // https://www.ietf.org/rfc/rfc6497.txt scan.scan() if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) { - _, end = parseTag(scan) + _, end = parseTag(scan, false) scan.toLower(start, end) } for len(scan.token) == 2 && !isAlpha(scan.token[1]) { diff --git a/vendor/golang.org/x/text/internal/language/tables.go b/vendor/golang.org/x/text/internal/language/tables.go index a19480c5b..fb6b58378 100644 --- a/vendor/golang.org/x/text/internal/language/tables.go +++ b/vendor/golang.org/x/text/internal/language/tables.go @@ -7,9 +7,9 @@ import "golang.org/x/text/internal/tag" // CLDRVersion is the CLDR version from which the tables in this package are derived. const CLDRVersion = "32" -const NumLanguages = 8717 +const NumLanguages = 8752 -const NumScripts = 251 +const NumScripts = 258 const NumRegions = 357 @@ -121,9 +121,10 @@ const langPrivateEnd = 0x3179 // lang holds an alphabetically sorted list of ISO-639 language identifiers. // All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. // For 2-byte language identifiers, the two successive bytes have the following meaning: -// - if the first letter of the 2- and 3-letter ISO codes are the same: -// the second and third letter of the 3-letter ISO code. -// - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. +// - if the first letter of the 2- and 3-letter ISO codes are the same: +// the second and third letter of the 3-letter ISO code. +// - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. +// // For 3-byte language identifiers the 4th byte is 0. const lang tag.Index = "" + // Size: 5324 bytes "---\x00aaaraai\x00aak\x00aau\x00abbkabi\x00abq\x00abr\x00abt\x00aby\x00a" + @@ -265,7 +266,7 @@ var langNoIndex = [2197]uint8{ 0xad, 0x03, 0xff, 0xff, 0xcf, 0x05, 0x84, 0x62, 0xe9, 0xbf, 0xfd, 0xbf, 0xbf, 0xf7, 0xfd, 0x77, 0x0f, 0xff, 0xef, 0x6f, 0xff, 0xfb, 0xdf, 0xe2, - 0xc9, 0xf8, 0x7f, 0x7e, 0x4d, 0xb8, 0x0a, 0x6a, + 0xc9, 0xf8, 0x7f, 0x7e, 0x4d, 0xbc, 0x0a, 0x6a, 0x7c, 0xea, 0xe3, 0xfa, 0x7a, 0xbf, 0x67, 0xff, // Entry 40 - 7F 0xff, 0xff, 0xff, 0xdf, 0x2a, 0x54, 0x91, 0xc0, @@ -277,7 +278,7 @@ var langNoIndex = [2197]uint8{ 0xa8, 0xff, 0x1f, 0x67, 0x7d, 0xeb, 0xef, 0xce, 0xff, 0xff, 0x9f, 0xff, 0xb7, 0xef, 0xfe, 0xcf, // Entry 80 - BF - 0xdb, 0xff, 0xf3, 0xcd, 0xfb, 0x2f, 0xff, 0xff, + 0xdb, 0xff, 0xf3, 0xcd, 0xfb, 0x6f, 0xff, 0xff, 0xbb, 0xee, 0xf7, 0xbd, 0xdb, 0xff, 0x5f, 0xf7, 0xfd, 0xf2, 0xfd, 0xff, 0x5e, 0x2f, 0x3b, 0xba, 0x7e, 0xff, 0xff, 0xfe, 0xf7, 0xff, 0xdd, 0xff, @@ -290,15 +291,15 @@ var langNoIndex = [2197]uint8{ 0x1b, 0x14, 0x08, 0xf3, 0x2b, 0xe7, 0x17, 0x56, 0x05, 0x7d, 0x0e, 0x1c, 0x37, 0x7b, 0xf3, 0xef, 0x97, 0xff, 0x5d, 0x38, 0x64, 0x08, 0x00, 0x10, - 0xbc, 0x85, 0xaf, 0xdf, 0xff, 0xff, 0x73, 0x35, - 0x3e, 0x87, 0xc7, 0xdf, 0xff, 0x01, 0x81, 0x00, + 0xbc, 0x85, 0xaf, 0xdf, 0xff, 0xff, 0x7b, 0x35, + 0x3e, 0xc7, 0xc7, 0xdf, 0xff, 0x01, 0x81, 0x00, 0xb0, 0x05, 0x80, 0x00, 0x00, 0x00, 0x00, 0x03, 0x40, 0x00, 0x40, 0x92, 0x21, 0x50, 0xb1, 0x5d, // Entry 100 - 13F 0xfd, 0xdc, 0xbe, 0x5e, 0x00, 0x00, 0x02, 0x64, 0x0d, 0x19, 0x41, 0xdf, 0x79, 0x22, 0x00, 0x00, 0x00, 0x5e, 0x64, 0xdc, 0x24, 0xe5, 0xd9, 0xe3, - 0xfe, 0xff, 0xfd, 0xcb, 0x9f, 0x14, 0x01, 0x0c, + 0xfe, 0xff, 0xfd, 0xcb, 0x9f, 0x14, 0x41, 0x0c, 0x86, 0x00, 0xd1, 0x00, 0xf0, 0xc7, 0x67, 0x5f, 0x56, 0x99, 0x5e, 0xb5, 0x6c, 0xaf, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0xc0, 0x37, 0xda, 0x56, @@ -309,9 +310,9 @@ var langNoIndex = [2197]uint8{ 0x0a, 0x00, 0x01, 0x00, 0x00, 0x10, 0x11, 0x09, 0x00, 0x00, 0x60, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x44, 0x00, 0x00, 0x10, 0x00, 0x04, - 0x08, 0x00, 0x00, 0x04, 0x00, 0x80, 0x28, 0x04, + 0x08, 0x00, 0x00, 0x05, 0x00, 0x80, 0x28, 0x04, 0x00, 0x00, 0x40, 0xd5, 0x2d, 0x00, 0x64, 0x35, - 0x24, 0x52, 0xf4, 0xd4, 0xbd, 0x62, 0xc9, 0x03, + 0x24, 0x52, 0xf4, 0xd5, 0xbf, 0x62, 0xc9, 0x03, // Entry 180 - 1BF 0x00, 0x80, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x13, 0x39, 0x01, 0xdd, 0x57, 0x98, @@ -333,20 +334,20 @@ var langNoIndex = [2197]uint8{ // Entry 200 - 23F 0xdf, 0xc3, 0x83, 0x82, 0xc0, 0xfb, 0x57, 0x27, 0xed, 0x55, 0xe7, 0x01, 0x00, 0x20, 0xb2, 0xc5, - 0xa4, 0x45, 0x25, 0x9b, 0x02, 0xdf, 0xe0, 0xdf, - 0x03, 0x44, 0x08, 0x90, 0x01, 0x04, 0x01, 0xe3, + 0xa4, 0x45, 0x25, 0x9b, 0x02, 0xdf, 0xe1, 0xdf, + 0x03, 0x44, 0x08, 0x90, 0x01, 0x04, 0x81, 0xe3, 0x92, 0x54, 0xdb, 0x28, 0xd3, 0x5f, 0xfe, 0x6d, 0x79, 0xed, 0x1c, 0x7d, 0x04, 0x08, 0x00, 0x01, 0x21, 0x12, 0x64, 0x5f, 0xdd, 0x0e, 0x85, 0x4f, 0x40, 0x40, 0x00, 0x04, 0xf1, 0xfd, 0x3d, 0x54, // Entry 240 - 27F 0xe8, 0x03, 0xb4, 0x27, 0x23, 0x0d, 0x00, 0x00, - 0x20, 0x7b, 0x78, 0x02, 0x05, 0x84, 0x00, 0xf0, + 0x20, 0x7b, 0x78, 0x02, 0x07, 0x84, 0x00, 0xf0, 0xbb, 0x7e, 0x5a, 0x00, 0x18, 0x04, 0x81, 0x00, 0x00, 0x00, 0x80, 0x10, 0x90, 0x1c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x40, 0x00, 0x04, 0x08, 0xa0, 0x70, 0xa5, 0x0c, 0x40, 0x00, 0x00, - 0x11, 0x24, 0x04, 0x68, 0x00, 0x20, 0x70, 0xff, + 0x91, 0x24, 0x04, 0x68, 0x00, 0x20, 0x70, 0xff, 0x7b, 0x7f, 0x70, 0x00, 0x05, 0x9b, 0xdd, 0x66, // Entry 280 - 2BF 0x03, 0x00, 0x11, 0x00, 0x00, 0x00, 0x40, 0x05, @@ -365,12 +366,12 @@ var langNoIndex = [2197]uint8{ 0xa7, 0x81, 0x47, 0x97, 0xfb, 0x00, 0x10, 0x00, 0x08, 0x00, 0x80, 0x00, 0x40, 0x04, 0x00, 0x01, 0x02, 0x00, 0x01, 0x40, 0x80, 0x00, 0x00, 0x08, - 0xd8, 0xeb, 0xf6, 0x39, 0xc4, 0x89, 0x12, 0x00, + 0xd8, 0xeb, 0xf6, 0x39, 0xc4, 0x8d, 0x12, 0x00, // Entry 300 - 33F 0x00, 0x0c, 0x04, 0x01, 0x20, 0x20, 0xdd, 0xa0, 0x01, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x04, 0x10, 0xd0, 0x9d, 0x95, 0x13, 0x04, 0x80, - 0x00, 0x01, 0xd0, 0x12, 0x40, 0x00, 0x10, 0xb0, + 0x00, 0x01, 0xd0, 0x16, 0x40, 0x00, 0x10, 0xb0, 0x10, 0x62, 0x4c, 0xd2, 0x02, 0x01, 0x4a, 0x00, 0x46, 0x04, 0x00, 0x08, 0x02, 0x00, 0x20, 0x80, 0x00, 0x80, 0x06, 0x00, 0x08, 0x00, 0x00, 0x00, @@ -397,9 +398,9 @@ var langNoIndex = [2197]uint8{ 0x02, 0x30, 0x9f, 0x7a, 0x16, 0xbd, 0x7f, 0x57, 0xf2, 0xff, 0x31, 0xff, 0xf2, 0x1e, 0x90, 0xf7, 0xf1, 0xf9, 0x45, 0x80, 0x01, 0x02, 0x00, 0x00, - 0x40, 0x54, 0x9f, 0x8a, 0xd9, 0xf9, 0x2e, 0x11, + 0x40, 0x54, 0x9f, 0x8a, 0xdb, 0xf9, 0x2e, 0x11, 0x86, 0x51, 0xc0, 0xf3, 0xfb, 0x47, 0x40, 0x01, - 0x05, 0xd1, 0x50, 0x5c, 0x00, 0x00, 0x00, 0x10, + 0x05, 0xd1, 0x50, 0x5c, 0x00, 0x40, 0x00, 0x10, 0x04, 0x02, 0x00, 0x00, 0x0a, 0x00, 0x17, 0xd2, 0xb9, 0xfd, 0xfc, 0xba, 0xfe, 0xef, 0xc7, 0xbe, // Entry 400 - 43F @@ -421,19 +422,19 @@ var langNoIndex = [2197]uint8{ 0xcd, 0xff, 0xfb, 0xff, 0xdf, 0xd7, 0xea, 0xff, 0xe5, 0x5f, 0x6d, 0x0f, 0xa7, 0x51, 0x06, 0xc4, // Entry 480 - 4BF - 0x13, 0x50, 0x5d, 0xaf, 0xa6, 0xff, 0x99, 0xfb, + 0x93, 0x50, 0x5d, 0xaf, 0xa6, 0xff, 0x99, 0xfb, 0x63, 0x1d, 0x53, 0xff, 0xef, 0xb7, 0x35, 0x20, 0x14, 0x00, 0x55, 0x51, 0x82, 0x65, 0xf5, 0x41, 0xe2, 0xff, 0xfc, 0xdf, 0x02, 0x05, 0xc5, 0x05, - 0x00, 0x22, 0x00, 0x74, 0x69, 0x10, 0x08, 0x04, + 0x00, 0x22, 0x00, 0x74, 0x69, 0x10, 0x08, 0x05, 0x41, 0x00, 0x01, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x51, 0x20, 0x05, 0x04, 0x01, 0x00, 0x00, - 0x06, 0x01, 0x20, 0x00, 0x18, 0x01, 0x92, 0xb1, + 0x06, 0x01, 0x20, 0x00, 0x18, 0x01, 0x92, 0xf1, // Entry 4C0 - 4FF - 0xfd, 0x47, 0x49, 0x06, 0x95, 0x06, 0x57, 0xed, - 0xfb, 0x4c, 0x1c, 0x6b, 0x83, 0x04, 0x62, 0x40, + 0xfd, 0x47, 0x69, 0x06, 0x95, 0x06, 0x57, 0xed, + 0xfb, 0x4d, 0x1c, 0x6b, 0x83, 0x04, 0x62, 0x40, 0x00, 0x11, 0x42, 0x00, 0x00, 0x00, 0x54, 0x83, - 0xb8, 0x4f, 0x10, 0x8c, 0x89, 0x46, 0xde, 0xf7, + 0xb8, 0x4f, 0x10, 0x8e, 0x89, 0x46, 0xde, 0xf7, 0x13, 0x31, 0x00, 0x20, 0x00, 0x00, 0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x10, 0x00, 0x01, 0x00, 0x00, 0xf0, 0x5b, 0xf4, 0xbe, 0x3d, @@ -470,7 +471,7 @@ var langNoIndex = [2197]uint8{ 0xaa, 0x10, 0x5d, 0x98, 0x52, 0x00, 0x80, 0x20, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x02, 0x02, 0x19, 0x00, 0x10, 0x02, 0x10, 0x61, 0x5a, 0x9d, - 0x31, 0x00, 0x00, 0x00, 0x01, 0x10, 0x02, 0x20, + 0x31, 0x00, 0x00, 0x00, 0x01, 0x18, 0x02, 0x20, 0x00, 0x00, 0x01, 0x00, 0x42, 0x00, 0x20, 0x00, 0x00, 0x1f, 0xdf, 0xd2, 0xb9, 0xff, 0xfd, 0x3f, 0x1f, 0x98, 0xcf, 0x9c, 0xff, 0xaf, 0x5f, 0xfe, @@ -479,9 +480,9 @@ var langNoIndex = [2197]uint8{ 0xb7, 0xf6, 0xfb, 0xb3, 0xc7, 0xff, 0x6f, 0xf1, 0x73, 0xb1, 0x7f, 0x9f, 0x7f, 0xbd, 0xfc, 0xb7, 0xee, 0x1c, 0xfa, 0xcb, 0xef, 0xdd, 0xf9, 0xbd, - 0x6e, 0xae, 0x55, 0xfd, 0x6e, 0x81, 0x76, 0x1f, + 0x6e, 0xae, 0x55, 0xfd, 0x6e, 0x81, 0x76, 0x9f, 0xd4, 0x77, 0xf5, 0x7d, 0xfb, 0xff, 0xeb, 0xfe, - 0xbe, 0x5f, 0x46, 0x1b, 0xe9, 0x5f, 0x50, 0x18, + 0xbe, 0x5f, 0x46, 0x5b, 0xe9, 0x5f, 0x50, 0x18, 0x02, 0xfa, 0xf7, 0x9d, 0x15, 0x97, 0x05, 0x0f, // Entry 640 - 67F 0x75, 0xc4, 0x7d, 0x81, 0x92, 0xf5, 0x57, 0x6c, @@ -495,14 +496,14 @@ var langNoIndex = [2197]uint8{ // Entry 680 - 6BF 0x97, 0x9d, 0xbf, 0x9f, 0xf7, 0xc7, 0xfd, 0x37, 0xce, 0x7f, 0x04, 0x1d, 0x73, 0x7f, 0xf8, 0xda, - 0x5d, 0xce, 0x7d, 0x06, 0xb9, 0xea, 0x69, 0xa0, + 0x5d, 0xce, 0x7d, 0x06, 0xb9, 0xea, 0x79, 0xa0, 0x1a, 0x20, 0x00, 0x30, 0x02, 0x04, 0x24, 0x08, 0x04, 0x00, 0x00, 0x40, 0xd4, 0x02, 0x04, 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, 0x20, 0x01, 0x06, 0x50, 0x00, 0x08, 0x00, 0x00, 0x00, 0x24, 0x00, 0x04, 0x00, 0x10, 0xdc, 0x58, 0xd7, 0x0d, 0x0f, // Entry 6C0 - 6FF - 0x14, 0x4d, 0xf1, 0x16, 0x44, 0xd1, 0x42, 0x08, + 0x14, 0x4d, 0xf1, 0x16, 0x44, 0xd5, 0x42, 0x08, 0x40, 0x00, 0x00, 0x40, 0x00, 0x08, 0x00, 0x00, 0x00, 0xdc, 0xfb, 0xcb, 0x0e, 0x58, 0x48, 0x41, 0x24, 0x20, 0x04, 0x00, 0x30, 0x12, 0x40, 0x00, @@ -513,7 +514,7 @@ var langNoIndex = [2197]uint8{ // Entry 700 - 73F 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x80, 0x86, 0xc2, 0x00, 0x00, 0x00, 0x00, 0x01, - 0xdf, 0x18, 0x00, 0x00, 0x02, 0xf0, 0xfd, 0x79, + 0xff, 0x18, 0x02, 0x00, 0x02, 0xf0, 0xfd, 0x79, 0x3b, 0x00, 0x25, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x03, 0x00, 0x09, 0x20, 0x00, 0x00, 0x01, 0x00, @@ -525,7 +526,7 @@ var langNoIndex = [2197]uint8{ 0xcd, 0xf9, 0x5c, 0x00, 0x01, 0x00, 0x30, 0x04, 0x04, 0x55, 0x00, 0x01, 0x04, 0xf4, 0x3f, 0x4a, 0x01, 0x00, 0x00, 0xb0, 0x80, 0x20, 0x55, 0x75, - 0x97, 0x7c, 0x9f, 0x31, 0xcc, 0x68, 0xd1, 0x03, + 0x97, 0x7c, 0xdf, 0x31, 0xcc, 0x68, 0xd1, 0x03, 0xd5, 0x57, 0x27, 0x14, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2c, 0xf7, 0xcb, 0x1f, 0x14, 0x60, // Entry 780 - 7BF @@ -538,7 +539,7 @@ var langNoIndex = [2197]uint8{ 0xe8, 0x30, 0x90, 0x6a, 0x92, 0x00, 0x00, 0x02, 0xff, 0xef, 0xff, 0x4b, 0x85, 0x53, 0xf4, 0xed, // Entry 7C0 - 7FF - 0xdd, 0xbf, 0x72, 0x1d, 0xc7, 0x0c, 0xd5, 0x42, + 0xdd, 0xbf, 0xf2, 0x5d, 0xc7, 0x0c, 0xd5, 0x42, 0xfc, 0xff, 0xf7, 0x1f, 0x00, 0x80, 0x40, 0x56, 0xcc, 0x16, 0x9e, 0xea, 0x35, 0x7d, 0xef, 0xff, 0xbd, 0xa4, 0xaf, 0x01, 0x44, 0x18, 0x01, 0x4d, @@ -552,15 +553,15 @@ var langNoIndex = [2197]uint8{ 0x40, 0x9c, 0x44, 0xdf, 0xf5, 0x8f, 0x66, 0xb3, 0x55, 0x20, 0xd4, 0xc1, 0xd8, 0x30, 0x3d, 0x80, 0x00, 0x00, 0x00, 0x04, 0xd4, 0x11, 0xc5, 0x84, - 0x2e, 0x50, 0x00, 0x22, 0x50, 0x6e, 0xbd, 0x93, + 0x2f, 0x50, 0x00, 0x22, 0x50, 0x6e, 0xbd, 0x93, 0x07, 0x00, 0x20, 0x10, 0x84, 0xb2, 0x45, 0x10, 0x06, 0x44, 0x00, 0x00, 0x12, 0x02, 0x11, 0x00, // Entry 840 - 87F - 0xf0, 0xfb, 0xfd, 0x7f, 0x05, 0x00, 0x12, 0x81, + 0xf0, 0xfb, 0xfd, 0x7f, 0x05, 0x00, 0x16, 0x81, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x30, 0x02, 0x28, 0x84, 0x00, 0x21, 0xc0, 0x23, 0x24, 0x00, 0x00, - 0x00, 0xcb, 0xe4, 0x3a, 0x42, 0x88, 0x14, 0xf1, + 0x00, 0xcb, 0xe4, 0x3a, 0x46, 0x88, 0x14, 0xf1, 0xef, 0xff, 0x7f, 0x12, 0x01, 0x01, 0x84, 0x50, 0x07, 0xfc, 0xff, 0xff, 0x0f, 0x01, 0x00, 0x40, 0x10, 0x38, 0x01, 0x01, 0x1c, 0x12, 0x40, 0xe1, @@ -582,8 +583,8 @@ var altLangIndex = [6]uint16{ } // AliasMap maps langIDs to their suggested replacements. -// Size: 704 bytes, 176 elements -var AliasMap = [176]FromTo{ +// Size: 716 bytes, 179 elements +var AliasMap = [179]FromTo{ 0: {From: 0x82, To: 0x88}, 1: {From: 0x187, To: 0x1ae}, 2: {From: 0x1f3, To: 0x1e1}, @@ -612,172 +613,176 @@ var AliasMap = [176]FromTo{ 25: {From: 0x80c, To: 0x5a}, 26: {From: 0x815, To: 0x8d}, 27: {From: 0x87e, To: 0x810}, - 28: {From: 0x8c3, To: 0xee3}, - 29: {From: 0x9ef, To: 0x331}, - 30: {From: 0xa36, To: 0x2c5}, - 31: {From: 0xa3d, To: 0xbf}, - 32: {From: 0xabe, To: 0x3322}, - 33: {From: 0xb38, To: 0x529}, - 34: {From: 0xb75, To: 0x265a}, - 35: {From: 0xb7e, To: 0xbc3}, - 36: {From: 0xb9b, To: 0x44e}, - 37: {From: 0xbbc, To: 0x4229}, - 38: {From: 0xbbf, To: 0x529}, - 39: {From: 0xbfe, To: 0x2da7}, - 40: {From: 0xc2e, To: 0x3181}, - 41: {From: 0xcb9, To: 0xf3}, - 42: {From: 0xd08, To: 0xfa}, - 43: {From: 0xdc8, To: 0x11a}, - 44: {From: 0xdd7, To: 0x32d}, - 45: {From: 0xdf8, To: 0xdfb}, - 46: {From: 0xdfe, To: 0x531}, - 47: {From: 0xe01, To: 0xdf3}, - 48: {From: 0xedf, To: 0x205a}, - 49: {From: 0xee9, To: 0x222e}, - 50: {From: 0xeee, To: 0x2e9a}, - 51: {From: 0xf39, To: 0x367}, - 52: {From: 0x10d0, To: 0x140}, - 53: {From: 0x1104, To: 0x2d0}, - 54: {From: 0x11a0, To: 0x1ec}, - 55: {From: 0x1279, To: 0x21}, - 56: {From: 0x1424, To: 0x15e}, - 57: {From: 0x1470, To: 0x14e}, - 58: {From: 0x151f, To: 0xd9b}, - 59: {From: 0x1523, To: 0x390}, - 60: {From: 0x1532, To: 0x19f}, - 61: {From: 0x1580, To: 0x210}, - 62: {From: 0x1583, To: 0x10d}, - 63: {From: 0x15a3, To: 0x3caf}, - 64: {From: 0x1630, To: 0x222e}, - 65: {From: 0x166a, To: 0x19b}, - 66: {From: 0x16c8, To: 0x136}, - 67: {From: 0x1700, To: 0x29f8}, - 68: {From: 0x1718, To: 0x194}, - 69: {From: 0x1727, To: 0xf3f}, - 70: {From: 0x177a, To: 0x178}, - 71: {From: 0x1809, To: 0x17b6}, - 72: {From: 0x1816, To: 0x18f3}, - 73: {From: 0x188a, To: 0x436}, - 74: {From: 0x1979, To: 0x1d01}, - 75: {From: 0x1a74, To: 0x2bb0}, - 76: {From: 0x1a8a, To: 0x1f8}, - 77: {From: 0x1b5a, To: 0x1fa}, - 78: {From: 0x1b86, To: 0x1515}, - 79: {From: 0x1d64, To: 0x2c9b}, - 80: {From: 0x2038, To: 0x37b1}, - 81: {From: 0x203d, To: 0x20dd}, - 82: {From: 0x205a, To: 0x30b}, - 83: {From: 0x20e3, To: 0x274}, - 84: {From: 0x20ee, To: 0x263}, - 85: {From: 0x20f2, To: 0x22d}, - 86: {From: 0x20f9, To: 0x256}, - 87: {From: 0x210f, To: 0x21eb}, - 88: {From: 0x2135, To: 0x27d}, - 89: {From: 0x2160, To: 0x913}, - 90: {From: 0x2199, To: 0x121}, - 91: {From: 0x21ce, To: 0x1561}, - 92: {From: 0x21e6, To: 0x504}, - 93: {From: 0x21f4, To: 0x49f}, - 94: {From: 0x21fb, To: 0x269}, - 95: {From: 0x222d, To: 0x121}, - 96: {From: 0x2237, To: 0x121}, - 97: {From: 0x2262, To: 0x92a}, - 98: {From: 0x2316, To: 0x3226}, - 99: {From: 0x236a, To: 0x2835}, - 100: {From: 0x2382, To: 0x3365}, - 101: {From: 0x2472, To: 0x2c7}, - 102: {From: 0x24e4, To: 0x2ff}, - 103: {From: 0x24f0, To: 0x2fa}, - 104: {From: 0x24fa, To: 0x31f}, - 105: {From: 0x2550, To: 0xb5b}, - 106: {From: 0x25a9, To: 0xe2}, - 107: {From: 0x263e, To: 0x2d0}, - 108: {From: 0x26c9, To: 0x26b4}, - 109: {From: 0x26f9, To: 0x3c8}, - 110: {From: 0x2727, To: 0x3caf}, - 111: {From: 0x2755, To: 0x6a4}, - 112: {From: 0x2765, To: 0x26b4}, - 113: {From: 0x2789, To: 0x4358}, - 114: {From: 0x27c9, To: 0x2001}, - 115: {From: 0x28ea, To: 0x27b1}, - 116: {From: 0x28ef, To: 0x2837}, - 117: {From: 0x2914, To: 0x351}, - 118: {From: 0x2986, To: 0x2da7}, - 119: {From: 0x29f0, To: 0x96b}, - 120: {From: 0x2b1a, To: 0x38d}, - 121: {From: 0x2bfc, To: 0x395}, - 122: {From: 0x2c3f, To: 0x3caf}, - 123: {From: 0x2cfc, To: 0x3be}, - 124: {From: 0x2d13, To: 0x597}, - 125: {From: 0x2d47, To: 0x148}, - 126: {From: 0x2d48, To: 0x148}, - 127: {From: 0x2dff, To: 0x2f1}, - 128: {From: 0x2e08, To: 0x19cc}, - 129: {From: 0x2e1a, To: 0x2d95}, - 130: {From: 0x2e21, To: 0x292}, - 131: {From: 0x2e54, To: 0x7d}, - 132: {From: 0x2e65, To: 0x2282}, - 133: {From: 0x2ea0, To: 0x2e9b}, - 134: {From: 0x2eef, To: 0x2ed7}, - 135: {From: 0x3193, To: 0x3c4}, - 136: {From: 0x3366, To: 0x338e}, - 137: {From: 0x342a, To: 0x3dc}, - 138: {From: 0x34ee, To: 0x18d0}, - 139: {From: 0x35c8, To: 0x2c9b}, - 140: {From: 0x35e6, To: 0x412}, - 141: {From: 0x3658, To: 0x246}, - 142: {From: 0x3676, To: 0x3f4}, - 143: {From: 0x36fd, To: 0x445}, - 144: {From: 0x37c0, To: 0x121}, - 145: {From: 0x3816, To: 0x38f2}, - 146: {From: 0x382a, To: 0x2b48}, - 147: {From: 0x382b, To: 0x2c9b}, - 148: {From: 0x382f, To: 0xa9}, - 149: {From: 0x3832, To: 0x3228}, - 150: {From: 0x386c, To: 0x39a6}, - 151: {From: 0x3892, To: 0x3fc0}, - 152: {From: 0x38a5, To: 0x39d7}, - 153: {From: 0x38b4, To: 0x1fa4}, - 154: {From: 0x38b5, To: 0x2e9a}, - 155: {From: 0x395c, To: 0x47e}, - 156: {From: 0x3b4e, To: 0xd91}, - 157: {From: 0x3b78, To: 0x137}, - 158: {From: 0x3c99, To: 0x4bc}, - 159: {From: 0x3fbd, To: 0x100}, - 160: {From: 0x4208, To: 0xa91}, - 161: {From: 0x42be, To: 0x573}, - 162: {From: 0x42f9, To: 0x3f60}, - 163: {From: 0x4378, To: 0x25a}, - 164: {From: 0x43b8, To: 0xe6c}, - 165: {From: 0x43cd, To: 0x10f}, - 166: {From: 0x44af, To: 0x3322}, - 167: {From: 0x44e3, To: 0x512}, - 168: {From: 0x45ca, To: 0x2409}, - 169: {From: 0x45dd, To: 0x26dc}, - 170: {From: 0x4610, To: 0x48ae}, - 171: {From: 0x46ae, To: 0x46a0}, - 172: {From: 0x473e, To: 0x4745}, - 173: {From: 0x4817, To: 0x3503}, - 174: {From: 0x4916, To: 0x31f}, - 175: {From: 0x49a7, To: 0x523}, + 28: {From: 0x8a8, To: 0x8b7}, + 29: {From: 0x8c3, To: 0xee3}, + 30: {From: 0x8fa, To: 0x1dc}, + 31: {From: 0x9ef, To: 0x331}, + 32: {From: 0xa36, To: 0x2c5}, + 33: {From: 0xa3d, To: 0xbf}, + 34: {From: 0xabe, To: 0x3322}, + 35: {From: 0xb38, To: 0x529}, + 36: {From: 0xb75, To: 0x265a}, + 37: {From: 0xb7e, To: 0xbc3}, + 38: {From: 0xb9b, To: 0x44e}, + 39: {From: 0xbbc, To: 0x4229}, + 40: {From: 0xbbf, To: 0x529}, + 41: {From: 0xbfe, To: 0x2da7}, + 42: {From: 0xc2e, To: 0x3181}, + 43: {From: 0xcb9, To: 0xf3}, + 44: {From: 0xd08, To: 0xfa}, + 45: {From: 0xdc8, To: 0x11a}, + 46: {From: 0xdd7, To: 0x32d}, + 47: {From: 0xdf8, To: 0xdfb}, + 48: {From: 0xdfe, To: 0x531}, + 49: {From: 0xe01, To: 0xdf3}, + 50: {From: 0xedf, To: 0x205a}, + 51: {From: 0xee9, To: 0x222e}, + 52: {From: 0xeee, To: 0x2e9a}, + 53: {From: 0xf39, To: 0x367}, + 54: {From: 0x10d0, To: 0x140}, + 55: {From: 0x1104, To: 0x2d0}, + 56: {From: 0x11a0, To: 0x1ec}, + 57: {From: 0x1279, To: 0x21}, + 58: {From: 0x1424, To: 0x15e}, + 59: {From: 0x1470, To: 0x14e}, + 60: {From: 0x151f, To: 0xd9b}, + 61: {From: 0x1523, To: 0x390}, + 62: {From: 0x1532, To: 0x19f}, + 63: {From: 0x1580, To: 0x210}, + 64: {From: 0x1583, To: 0x10d}, + 65: {From: 0x15a3, To: 0x3caf}, + 66: {From: 0x1630, To: 0x222e}, + 67: {From: 0x166a, To: 0x19b}, + 68: {From: 0x16c8, To: 0x136}, + 69: {From: 0x1700, To: 0x29f8}, + 70: {From: 0x1718, To: 0x194}, + 71: {From: 0x1727, To: 0xf3f}, + 72: {From: 0x177a, To: 0x178}, + 73: {From: 0x1809, To: 0x17b6}, + 74: {From: 0x1816, To: 0x18f3}, + 75: {From: 0x188a, To: 0x436}, + 76: {From: 0x1979, To: 0x1d01}, + 77: {From: 0x1a74, To: 0x2bb0}, + 78: {From: 0x1a8a, To: 0x1f8}, + 79: {From: 0x1b5a, To: 0x1fa}, + 80: {From: 0x1b86, To: 0x1515}, + 81: {From: 0x1d64, To: 0x2c9b}, + 82: {From: 0x2038, To: 0x37b1}, + 83: {From: 0x203d, To: 0x20dd}, + 84: {From: 0x205a, To: 0x30b}, + 85: {From: 0x20e3, To: 0x274}, + 86: {From: 0x20ee, To: 0x263}, + 87: {From: 0x20f2, To: 0x22d}, + 88: {From: 0x20f9, To: 0x256}, + 89: {From: 0x210f, To: 0x21eb}, + 90: {From: 0x2135, To: 0x27d}, + 91: {From: 0x2160, To: 0x913}, + 92: {From: 0x2199, To: 0x121}, + 93: {From: 0x21ce, To: 0x1561}, + 94: {From: 0x21e6, To: 0x504}, + 95: {From: 0x21f4, To: 0x49f}, + 96: {From: 0x21fb, To: 0x269}, + 97: {From: 0x222d, To: 0x121}, + 98: {From: 0x2237, To: 0x121}, + 99: {From: 0x2262, To: 0x92a}, + 100: {From: 0x2316, To: 0x3226}, + 101: {From: 0x236a, To: 0x2835}, + 102: {From: 0x2382, To: 0x3365}, + 103: {From: 0x2472, To: 0x2c7}, + 104: {From: 0x24e4, To: 0x2ff}, + 105: {From: 0x24f0, To: 0x2fa}, + 106: {From: 0x24fa, To: 0x31f}, + 107: {From: 0x2550, To: 0xb5b}, + 108: {From: 0x25a9, To: 0xe2}, + 109: {From: 0x263e, To: 0x2d0}, + 110: {From: 0x26c9, To: 0x26b4}, + 111: {From: 0x26f9, To: 0x3c8}, + 112: {From: 0x2727, To: 0x3caf}, + 113: {From: 0x2755, To: 0x6a4}, + 114: {From: 0x2765, To: 0x26b4}, + 115: {From: 0x2789, To: 0x4358}, + 116: {From: 0x27c9, To: 0x2001}, + 117: {From: 0x28ea, To: 0x27b1}, + 118: {From: 0x28ef, To: 0x2837}, + 119: {From: 0x2914, To: 0x351}, + 120: {From: 0x2986, To: 0x2da7}, + 121: {From: 0x29f0, To: 0x96b}, + 122: {From: 0x2b1a, To: 0x38d}, + 123: {From: 0x2bfc, To: 0x395}, + 124: {From: 0x2c3f, To: 0x3caf}, + 125: {From: 0x2ce1, To: 0x2201}, + 126: {From: 0x2cfc, To: 0x3be}, + 127: {From: 0x2d13, To: 0x597}, + 128: {From: 0x2d47, To: 0x148}, + 129: {From: 0x2d48, To: 0x148}, + 130: {From: 0x2dff, To: 0x2f1}, + 131: {From: 0x2e08, To: 0x19cc}, + 132: {From: 0x2e1a, To: 0x2d95}, + 133: {From: 0x2e21, To: 0x292}, + 134: {From: 0x2e54, To: 0x7d}, + 135: {From: 0x2e65, To: 0x2282}, + 136: {From: 0x2ea0, To: 0x2e9b}, + 137: {From: 0x2eef, To: 0x2ed7}, + 138: {From: 0x3193, To: 0x3c4}, + 139: {From: 0x3366, To: 0x338e}, + 140: {From: 0x342a, To: 0x3dc}, + 141: {From: 0x34ee, To: 0x18d0}, + 142: {From: 0x35c8, To: 0x2c9b}, + 143: {From: 0x35e6, To: 0x412}, + 144: {From: 0x3658, To: 0x246}, + 145: {From: 0x3676, To: 0x3f4}, + 146: {From: 0x36fd, To: 0x445}, + 147: {From: 0x37c0, To: 0x121}, + 148: {From: 0x3816, To: 0x38f2}, + 149: {From: 0x382a, To: 0x2b48}, + 150: {From: 0x382b, To: 0x2c9b}, + 151: {From: 0x382f, To: 0xa9}, + 152: {From: 0x3832, To: 0x3228}, + 153: {From: 0x386c, To: 0x39a6}, + 154: {From: 0x3892, To: 0x3fc0}, + 155: {From: 0x38a5, To: 0x39d7}, + 156: {From: 0x38b4, To: 0x1fa4}, + 157: {From: 0x38b5, To: 0x2e9a}, + 158: {From: 0x395c, To: 0x47e}, + 159: {From: 0x3b4e, To: 0xd91}, + 160: {From: 0x3b78, To: 0x137}, + 161: {From: 0x3c99, To: 0x4bc}, + 162: {From: 0x3fbd, To: 0x100}, + 163: {From: 0x4208, To: 0xa91}, + 164: {From: 0x42be, To: 0x573}, + 165: {From: 0x42f9, To: 0x3f60}, + 166: {From: 0x4378, To: 0x25a}, + 167: {From: 0x43b8, To: 0xe6c}, + 168: {From: 0x43cd, To: 0x10f}, + 169: {From: 0x44af, To: 0x3322}, + 170: {From: 0x44e3, To: 0x512}, + 171: {From: 0x45ca, To: 0x2409}, + 172: {From: 0x45dd, To: 0x26dc}, + 173: {From: 0x4610, To: 0x48ae}, + 174: {From: 0x46ae, To: 0x46a0}, + 175: {From: 0x473e, To: 0x4745}, + 176: {From: 0x4817, To: 0x3503}, + 177: {From: 0x4916, To: 0x31f}, + 178: {From: 0x49a7, To: 0x523}, } -// Size: 176 bytes, 176 elements -var AliasTypes = [176]AliasType{ +// Size: 179 bytes, 179 elements +var AliasTypes = [179]AliasType{ // Entry 0 - 3F 1, 0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 1, 0, 0, 1, 2, - 1, 1, 2, 0, 0, 1, 0, 1, 2, 1, 1, 0, 0, 2, 1, 1, - 0, 2, 0, 0, 1, 0, 1, 0, 0, 1, 2, 1, 1, 1, 1, 0, - 0, 0, 0, 2, 1, 1, 1, 1, 2, 1, 0, 1, 1, 2, 2, 0, + 1, 1, 2, 0, 0, 1, 0, 1, 2, 1, 1, 0, 0, 0, 0, 2, + 1, 1, 0, 2, 0, 0, 1, 0, 1, 0, 0, 1, 2, 1, 1, 1, + 1, 0, 0, 0, 0, 2, 1, 1, 1, 1, 2, 1, 0, 1, 1, 2, // Entry 40 - 7F - 0, 1, 2, 0, 1, 0, 1, 1, 1, 1, 0, 0, 2, 1, 0, 0, - 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 0, 1, 2, 2, 2, 0, 1, 1, 0, 1, 0, 0, - 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 2, 1, 1, + 2, 0, 0, 1, 2, 0, 1, 0, 1, 1, 1, 1, 0, 0, 2, 1, + 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 1, 2, 2, 2, 0, 1, 1, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, // Entry 80 - BF - 0, 0, 1, 0, 0, 0, 0, 1, 1, 2, 0, 0, 2, 1, 1, 1, - 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, - 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, + 2, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 2, 0, 0, 2, + 1, 1, 1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 1, + 0, 1, 2, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, + 0, 1, 1, } const ( @@ -785,17 +790,17 @@ const ( _Hani = 57 _Hans = 59 _Hant = 60 - _Qaaa = 143 - _Qaai = 151 - _Qabx = 192 - _Zinh = 245 - _Zyyy = 250 - _Zzzz = 251 + _Qaaa = 147 + _Qaai = 155 + _Qabx = 196 + _Zinh = 252 + _Zyyy = 257 + _Zzzz = 258 ) // script is an alphabetically sorted list of ISO 15924 codes. The index // of the script in the string, divided by 4, is the internal scriptID. -const script tag.Index = "" + // Size: 1012 bytes +const script tag.Index = "" + // Size: 1040 bytes "----AdlmAfakAghbAhomArabAranArmiArmnAvstBaliBamuBassBatkBengBhksBlisBopo" + "BrahBraiBugiBuhdCakmCansCariChamCherChrsCirtCoptCpmnCprtCyrlCyrsDevaDiak" + "DogrDsrtDuplEgydEgyhEgypElbaElymEthiGeokGeorGlagGongGonmGothGranGrekGujr" + @@ -803,14 +808,14 @@ const script tag.Index = "" + // Size: 1012 bytes "JavaJpanJurcKaliKanaKharKhmrKhojKitlKitsKndaKoreKpelKthiLanaLaooLatfLatg" + "LatnLekeLepcLimbLinaLinbLisuLomaLyciLydiMahjMakaMandManiMarcMayaMedfMend" + "MercMeroMlymModiMongMoonMrooMteiMultMymrNandNarbNbatNewaNkdbNkgbNkooNshu" + - "OgamOlckOrkhOryaOsgeOsmaPalmPaucPermPhagPhliPhlpPhlvPhnxPiqdPlrdPrtiQaaa" + - "QaabQaacQaadQaaeQaafQaagQaahQaaiQaajQaakQaalQaamQaanQaaoQaapQaaqQaarQaas" + - "QaatQaauQaavQaawQaaxQaayQaazQabaQabbQabcQabdQabeQabfQabgQabhQabiQabjQabk" + - "QablQabmQabnQaboQabpQabqQabrQabsQabtQabuQabvQabwQabxRjngRohgRoroRunrSamr" + - "SaraSarbSaurSgnwShawShrdShuiSiddSindSinhSogdSogoSoraSoyoSundSyloSyrcSyre" + - "SyrjSyrnTagbTakrTaleTaluTamlTangTavtTeluTengTfngTglgThaaThaiTibtTirhToto" + - "UgarVaiiVispWaraWchoWoleXpeoXsuxYeziYiiiZanbZinhZmthZsyeZsymZxxxZyyyZzzz" + - "\xff\xff\xff\xff" + "OgamOlckOrkhOryaOsgeOsmaOugrPalmPaucPcunPelmPermPhagPhliPhlpPhlvPhnxPiqd" + + "PlrdPrtiPsinQaaaQaabQaacQaadQaaeQaafQaagQaahQaaiQaajQaakQaalQaamQaanQaao" + + "QaapQaaqQaarQaasQaatQaauQaavQaawQaaxQaayQaazQabaQabbQabcQabdQabeQabfQabg" + + "QabhQabiQabjQabkQablQabmQabnQaboQabpQabqQabrQabsQabtQabuQabvQabwQabxRanj" + + "RjngRohgRoroRunrSamrSaraSarbSaurSgnwShawShrdShuiSiddSindSinhSogdSogoSora" + + "SoyoSundSyloSyrcSyreSyrjSyrnTagbTakrTaleTaluTamlTangTavtTeluTengTfngTglg" + + "ThaaThaiTibtTirhTnsaTotoUgarVaiiVispVithWaraWchoWoleXpeoXsuxYeziYiiiZanb" + + "ZinhZmthZsyeZsymZxxxZyyyZzzz\xff\xff\xff\xff" // suppressScript is an index from langID to the dominant script for that language, // if it exists. If a script is given, it should be suppressed from the language tag. @@ -858,7 +863,7 @@ var suppressScript = [1330]uint8{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xe5, 0x00, 0x00, 0x00, 0x00, 0xe7, 0x00, 0x00, + 0xea, 0x00, 0x00, 0x00, 0x00, 0xec, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x5a, 0x00, 0x5a, 0x00, // Entry 140 - 17F @@ -962,7 +967,7 @@ var suppressScript = [1330]uint8{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Entry 400 - 43F 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xcf, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -972,10 +977,10 @@ var suppressScript = [1330]uint8{ // Entry 440 - 47F 0x00, 0x00, 0x00, 0x00, 0x5a, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xde, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xe3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0xe1, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0xe6, 0x00, 0x00, 0x00, 0x2c, + 0x00, 0xe6, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xeb, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00, // Entry 480 - 4BF @@ -1086,9 +1091,9 @@ var regionTypes = [358]uint8{ // regionISO holds a list of alphabetically sorted 2-letter ISO region codes. // Each 2-letter codes is followed by two bytes with the following meaning: -// - [A-Z}{2}: the first letter of the 2-letter code plus these two -// letters form the 3-letter ISO code. -// - 0, n: index into altRegionISO3. +// - [A-Z}{2}: the first letter of the 2-letter code plus these two +// letters form the 3-letter ISO code. +// - 0, n: index into altRegionISO3. const regionISO tag.Index = "" + // Size: 1308 bytes "AAAAACSCADNDAEREAFFGAGTGAIIAALLBAMRMANNTAOGOAQTAARRGASSMATUTAUUSAWBWAXLA" + "AZZEBAIHBBRBBDGDBEELBFFABGGRBHHRBIDIBJENBLLMBMMUBNRNBOOLBQESBRRABSHSBTTN" + @@ -1206,7 +1211,9 @@ var m49 = [358]int16{ // m49Index gives indexes into fromM49 based on the three most significant bits // of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in -// fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] +// +// fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] +// // for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. // The region code is stored in the 9 lsb of the indexed value. // Size: 18 bytes, 9 elements @@ -1268,117 +1275,118 @@ var fromM49 = [333]uint16{ 0xc759, 0xc95a, 0xcb5b, 0xcd5c, 0xcf65, } -// Size: 1995 bytes +// Size: 2014 bytes var variantIndex = map[string]uint8{ "1606nict": 0x0, "1694acad": 0x1, "1901": 0x2, "1959acad": 0x3, - "1994": 0x60, + "1994": 0x61, "1996": 0x4, "abl1943": 0x5, "akuapem": 0x6, - "alalc97": 0x62, + "alalc97": 0x63, "aluku": 0x7, "ao1990": 0x8, "aranes": 0x9, "arevela": 0xa, "arevmda": 0xb, - "asante": 0xc, - "auvern": 0xd, - "baku1926": 0xe, - "balanka": 0xf, - "barla": 0x10, - "basiceng": 0x11, - "bauddha": 0x12, - "biscayan": 0x13, - "biske": 0x5b, - "bohoric": 0x14, - "boont": 0x15, - "bornholm": 0x16, - "cisaup": 0x17, - "colb1945": 0x18, - "cornu": 0x19, - "creiss": 0x1a, - "dajnko": 0x1b, - "ekavsk": 0x1c, - "emodeng": 0x1d, - "fonipa": 0x63, - "fonkirsh": 0x64, - "fonnapa": 0x65, - "fonupa": 0x66, - "fonxsamp": 0x67, - "gascon": 0x1e, - "grclass": 0x1f, - "grital": 0x20, - "grmistr": 0x21, - "hepburn": 0x22, - "heploc": 0x61, - "hognorsk": 0x23, - "hsistemo": 0x24, - "ijekavsk": 0x25, - "itihasa": 0x26, - "ivanchov": 0x27, - "jauer": 0x28, - "jyutping": 0x29, - "kkcor": 0x2a, - "kociewie": 0x2b, - "kscor": 0x2c, - "laukika": 0x2d, - "lemosin": 0x2e, - "lengadoc": 0x2f, - "lipaw": 0x5c, - "luna1918": 0x30, - "metelko": 0x31, - "monoton": 0x32, - "ndyuka": 0x33, - "nedis": 0x34, - "newfound": 0x35, - "nicard": 0x36, - "njiva": 0x5d, - "nulik": 0x37, - "osojs": 0x5e, - "oxendict": 0x38, - "pahawh2": 0x39, - "pahawh3": 0x3a, - "pahawh4": 0x3b, - "pamaka": 0x3c, - "peano": 0x3d, - "petr1708": 0x3e, - "pinyin": 0x3f, - "polyton": 0x40, - "provenc": 0x41, - "puter": 0x42, - "rigik": 0x43, - "rozaj": 0x44, - "rumgr": 0x45, - "scotland": 0x46, - "scouse": 0x47, - "simple": 0x68, - "solba": 0x5f, - "sotav": 0x48, - "spanglis": 0x49, - "surmiran": 0x4a, - "sursilv": 0x4b, - "sutsilv": 0x4c, - "tarask": 0x4d, - "tongyong": 0x4e, - "tunumiit": 0x4f, - "uccor": 0x50, - "ucrcor": 0x51, - "ulster": 0x52, - "unifon": 0x53, - "vaidika": 0x54, - "valencia": 0x55, - "vallader": 0x56, - "vecdruka": 0x57, - "vivaraup": 0x58, - "wadegile": 0x59, - "xsistemo": 0x5a, + "arkaika": 0xc, + "asante": 0xd, + "auvern": 0xe, + "baku1926": 0xf, + "balanka": 0x10, + "barla": 0x11, + "basiceng": 0x12, + "bauddha": 0x13, + "biscayan": 0x14, + "biske": 0x5c, + "bohoric": 0x15, + "boont": 0x16, + "bornholm": 0x17, + "cisaup": 0x18, + "colb1945": 0x19, + "cornu": 0x1a, + "creiss": 0x1b, + "dajnko": 0x1c, + "ekavsk": 0x1d, + "emodeng": 0x1e, + "fonipa": 0x64, + "fonkirsh": 0x65, + "fonnapa": 0x66, + "fonupa": 0x67, + "fonxsamp": 0x68, + "gascon": 0x1f, + "grclass": 0x20, + "grital": 0x21, + "grmistr": 0x22, + "hepburn": 0x23, + "heploc": 0x62, + "hognorsk": 0x24, + "hsistemo": 0x25, + "ijekavsk": 0x26, + "itihasa": 0x27, + "ivanchov": 0x28, + "jauer": 0x29, + "jyutping": 0x2a, + "kkcor": 0x2b, + "kociewie": 0x2c, + "kscor": 0x2d, + "laukika": 0x2e, + "lemosin": 0x2f, + "lengadoc": 0x30, + "lipaw": 0x5d, + "luna1918": 0x31, + "metelko": 0x32, + "monoton": 0x33, + "ndyuka": 0x34, + "nedis": 0x35, + "newfound": 0x36, + "nicard": 0x37, + "njiva": 0x5e, + "nulik": 0x38, + "osojs": 0x5f, + "oxendict": 0x39, + "pahawh2": 0x3a, + "pahawh3": 0x3b, + "pahawh4": 0x3c, + "pamaka": 0x3d, + "peano": 0x3e, + "petr1708": 0x3f, + "pinyin": 0x40, + "polyton": 0x41, + "provenc": 0x42, + "puter": 0x43, + "rigik": 0x44, + "rozaj": 0x45, + "rumgr": 0x46, + "scotland": 0x47, + "scouse": 0x48, + "simple": 0x69, + "solba": 0x60, + "sotav": 0x49, + "spanglis": 0x4a, + "surmiran": 0x4b, + "sursilv": 0x4c, + "sutsilv": 0x4d, + "tarask": 0x4e, + "tongyong": 0x4f, + "tunumiit": 0x50, + "uccor": 0x51, + "ucrcor": 0x52, + "ulster": 0x53, + "unifon": 0x54, + "vaidika": 0x55, + "valencia": 0x56, + "vallader": 0x57, + "vecdruka": 0x58, + "vivaraup": 0x59, + "wadegile": 0x5a, + "xsistemo": 0x5b, } // variantNumSpecialized is the number of specialized variants in variants. -const variantNumSpecialized = 98 +const variantNumSpecialized = 99 // nRegionGroups is the number of region groups. const nRegionGroups = 33 @@ -1390,8 +1398,8 @@ type likelyLangRegion struct { // likelyScript is a lookup table, indexed by scriptID, for the most likely // languages and regions given a script. -// Size: 1012 bytes, 253 elements -var likelyScript = [253]likelyLangRegion{ +// Size: 1040 bytes, 260 elements +var likelyScript = [260]likelyLangRegion{ 1: {lang: 0x14e, region: 0x84}, 3: {lang: 0x2a2, region: 0x106}, 4: {lang: 0x1f, region: 0x99}, @@ -1489,57 +1497,57 @@ var likelyScript = [253]likelyLangRegion{ 129: {lang: 0x395, region: 0x99}, 130: {lang: 0x399, region: 0x135}, 131: {lang: 0x429, region: 0x115}, - 132: {lang: 0x3b, region: 0x11c}, - 133: {lang: 0xfd, region: 0xc4}, - 134: {lang: 0x27d, region: 0x106}, - 135: {lang: 0x2c9, region: 0x53}, - 136: {lang: 0x39f, region: 0x9c}, - 137: {lang: 0x39f, region: 0x53}, - 139: {lang: 0x3ad, region: 0xb0}, - 141: {lang: 0x1c6, region: 0x53}, - 142: {lang: 0x4fd, region: 0x9c}, - 193: {lang: 0x3cb, region: 0x95}, - 196: {lang: 0x372, region: 0x10c}, - 197: {lang: 0x420, region: 0x97}, - 199: {lang: 0x4ff, region: 0x15e}, - 200: {lang: 0x3f0, region: 0x99}, - 201: {lang: 0x45, region: 0x135}, - 202: {lang: 0x139, region: 0x7b}, - 203: {lang: 0x3e9, region: 0x99}, - 205: {lang: 0x3e9, region: 0x99}, - 206: {lang: 0x3fa, region: 0x99}, - 207: {lang: 0x40c, region: 0xb3}, - 210: {lang: 0x433, region: 0x99}, - 211: {lang: 0xef, region: 0xc5}, - 212: {lang: 0x43e, region: 0x95}, - 213: {lang: 0x44d, region: 0x35}, - 214: {lang: 0x44e, region: 0x9b}, - 218: {lang: 0x45a, region: 0xe7}, - 219: {lang: 0x11a, region: 0x99}, - 220: {lang: 0x45e, region: 0x53}, - 221: {lang: 0x232, region: 0x53}, - 222: {lang: 0x450, region: 0x99}, - 223: {lang: 0x4a5, region: 0x53}, - 224: {lang: 0x9f, region: 0x13e}, - 225: {lang: 0x461, region: 0x99}, - 227: {lang: 0x528, region: 0xba}, - 228: {lang: 0x153, region: 0xe7}, - 229: {lang: 0x128, region: 0xcd}, - 230: {lang: 0x46b, region: 0x123}, - 231: {lang: 0xa9, region: 0x53}, - 232: {lang: 0x2ce, region: 0x99}, - 234: {lang: 0x4ad, region: 0x11c}, - 235: {lang: 0x4be, region: 0xb4}, - 237: {lang: 0x1ce, region: 0x99}, - 240: {lang: 0x3a9, region: 0x9c}, - 241: {lang: 0x22, region: 0x9b}, - 243: {lang: 0x1ea, region: 0x53}, - 244: {lang: 0xef, region: 0xc5}, + 133: {lang: 0x3b, region: 0x11c}, + 134: {lang: 0xfd, region: 0xc4}, + 137: {lang: 0x27d, region: 0x106}, + 138: {lang: 0x2c9, region: 0x53}, + 139: {lang: 0x39f, region: 0x9c}, + 140: {lang: 0x39f, region: 0x53}, + 142: {lang: 0x3ad, region: 0xb0}, + 144: {lang: 0x1c6, region: 0x53}, + 145: {lang: 0x4fd, region: 0x9c}, + 198: {lang: 0x3cb, region: 0x95}, + 201: {lang: 0x372, region: 0x10c}, + 202: {lang: 0x420, region: 0x97}, + 204: {lang: 0x4ff, region: 0x15e}, + 205: {lang: 0x3f0, region: 0x99}, + 206: {lang: 0x45, region: 0x135}, + 207: {lang: 0x139, region: 0x7b}, + 208: {lang: 0x3e9, region: 0x99}, + 210: {lang: 0x3e9, region: 0x99}, + 211: {lang: 0x3fa, region: 0x99}, + 212: {lang: 0x40c, region: 0xb3}, + 215: {lang: 0x433, region: 0x99}, + 216: {lang: 0xef, region: 0xc5}, + 217: {lang: 0x43e, region: 0x95}, + 218: {lang: 0x44d, region: 0x35}, + 219: {lang: 0x44e, region: 0x9b}, + 223: {lang: 0x45a, region: 0xe7}, + 224: {lang: 0x11a, region: 0x99}, + 225: {lang: 0x45e, region: 0x53}, + 226: {lang: 0x232, region: 0x53}, + 227: {lang: 0x450, region: 0x99}, + 228: {lang: 0x4a5, region: 0x53}, + 229: {lang: 0x9f, region: 0x13e}, + 230: {lang: 0x461, region: 0x99}, + 232: {lang: 0x528, region: 0xba}, + 233: {lang: 0x153, region: 0xe7}, + 234: {lang: 0x128, region: 0xcd}, + 235: {lang: 0x46b, region: 0x123}, + 236: {lang: 0xa9, region: 0x53}, + 237: {lang: 0x2ce, region: 0x99}, + 240: {lang: 0x4ad, region: 0x11c}, + 241: {lang: 0x4be, region: 0xb4}, + 244: {lang: 0x1ce, region: 0x99}, + 247: {lang: 0x3a9, region: 0x9c}, + 248: {lang: 0x22, region: 0x9b}, + 250: {lang: 0x1ea, region: 0x53}, + 251: {lang: 0xef, region: 0xc5}, } type likelyScriptRegion struct { region uint16 - script uint8 + script uint16 flags uint8 } @@ -1547,7 +1555,7 @@ type likelyScriptRegion struct { // scripts and regions given incomplete information. If more entries exist for a // given language, region and script are the index and size respectively // of the list in likelyLangList. -// Size: 5320 bytes, 1330 elements +// Size: 7980 bytes, 1330 elements var likelyLang = [1330]likelyScriptRegion{ 0: {region: 0x135, script: 0x5a, flags: 0x0}, 1: {region: 0x6f, script: 0x5a, flags: 0x0}, @@ -1583,7 +1591,7 @@ var likelyLang = [1330]likelyScriptRegion{ 31: {region: 0x99, script: 0x4, flags: 0x0}, 32: {region: 0x165, script: 0x5a, flags: 0x0}, 33: {region: 0x80, script: 0x5a, flags: 0x0}, - 34: {region: 0x9b, script: 0xf1, flags: 0x0}, + 34: {region: 0x9b, script: 0xf8, flags: 0x0}, 35: {region: 0x165, script: 0x5a, flags: 0x0}, 36: {region: 0x165, script: 0x5a, flags: 0x0}, 37: {region: 0x14d, script: 0x5a, flags: 0x0}, @@ -1616,7 +1624,7 @@ var likelyLang = [1330]likelyScriptRegion{ 66: {region: 0x6b, script: 0x5, flags: 0x0}, 67: {region: 0x99, script: 0xe, flags: 0x0}, 68: {region: 0x12f, script: 0x5a, flags: 0x0}, - 69: {region: 0x135, script: 0xc9, flags: 0x0}, + 69: {region: 0x135, script: 0xce, flags: 0x0}, 70: {region: 0x165, script: 0x5a, flags: 0x0}, 71: {region: 0x165, script: 0x5a, flags: 0x0}, 72: {region: 0x6e, script: 0x5a, flags: 0x0}, @@ -1670,7 +1678,7 @@ var likelyLang = [1330]likelyScriptRegion{ 120: {region: 0x165, script: 0x5a, flags: 0x0}, 121: {region: 0x12f, script: 0x5a, flags: 0x0}, 122: {region: 0x52, script: 0x5a, flags: 0x0}, - 123: {region: 0x99, script: 0xde, flags: 0x0}, + 123: {region: 0x99, script: 0xe3, flags: 0x0}, 124: {region: 0xe8, script: 0x5, flags: 0x0}, 125: {region: 0x99, script: 0x22, flags: 0x0}, 126: {region: 0x38, script: 0x20, flags: 0x0}, @@ -1705,7 +1713,7 @@ var likelyLang = [1330]likelyScriptRegion{ 156: {region: 0x165, script: 0x5a, flags: 0x0}, 157: {region: 0xe7, script: 0x5a, flags: 0x0}, 158: {region: 0x165, script: 0x5a, flags: 0x0}, - 159: {region: 0x13e, script: 0xe0, flags: 0x0}, + 159: {region: 0x13e, script: 0xe5, flags: 0x0}, 160: {region: 0xc3, script: 0x5a, flags: 0x0}, 161: {region: 0x165, script: 0x5a, flags: 0x0}, 162: {region: 0x165, script: 0x5a, flags: 0x0}, @@ -1715,7 +1723,7 @@ var likelyLang = [1330]likelyScriptRegion{ 166: {region: 0x165, script: 0x5a, flags: 0x0}, 167: {region: 0x165, script: 0x5a, flags: 0x0}, 168: {region: 0x165, script: 0x5a, flags: 0x0}, - 169: {region: 0x53, script: 0xe7, flags: 0x0}, + 169: {region: 0x53, script: 0xec, flags: 0x0}, 170: {region: 0x165, script: 0x5a, flags: 0x0}, 171: {region: 0x165, script: 0x5a, flags: 0x0}, 172: {region: 0x165, script: 0x5a, flags: 0x0}, @@ -1785,7 +1793,7 @@ var likelyLang = [1330]likelyScriptRegion{ 236: {region: 0x165, script: 0x5a, flags: 0x0}, 237: {region: 0x165, script: 0x5a, flags: 0x0}, 238: {region: 0x165, script: 0x5a, flags: 0x0}, - 239: {region: 0xc5, script: 0xd3, flags: 0x0}, + 239: {region: 0xc5, script: 0xd8, flags: 0x0}, 240: {region: 0x78, script: 0x5a, flags: 0x0}, 241: {region: 0x6b, script: 0x1d, flags: 0x0}, 242: {region: 0xe7, script: 0x5a, flags: 0x0}, @@ -1799,7 +1807,7 @@ var likelyLang = [1330]likelyScriptRegion{ 250: {region: 0x5e, script: 0x5a, flags: 0x0}, 251: {region: 0xe9, script: 0x5a, flags: 0x0}, 252: {region: 0x49, script: 0x17, flags: 0x0}, - 253: {region: 0xc4, script: 0x85, flags: 0x0}, + 253: {region: 0xc4, script: 0x86, flags: 0x0}, 254: {region: 0x8, script: 0x2, flags: 0x1}, 255: {region: 0x106, script: 0x20, flags: 0x0}, 256: {region: 0x7b, script: 0x5a, flags: 0x0}, @@ -1842,12 +1850,12 @@ var likelyLang = [1330]likelyScriptRegion{ 293: {region: 0x165, script: 0x5a, flags: 0x0}, 294: {region: 0x165, script: 0x5a, flags: 0x0}, 295: {region: 0x165, script: 0x5a, flags: 0x0}, - 296: {region: 0xcd, script: 0xe5, flags: 0x0}, + 296: {region: 0xcd, script: 0xea, flags: 0x0}, 297: {region: 0x165, script: 0x5a, flags: 0x0}, 298: {region: 0x165, script: 0x5a, flags: 0x0}, 299: {region: 0x114, script: 0x5a, flags: 0x0}, 300: {region: 0x37, script: 0x5a, flags: 0x0}, - 301: {region: 0x43, script: 0xe7, flags: 0x0}, + 301: {region: 0x43, script: 0xec, flags: 0x0}, 302: {region: 0x165, script: 0x5a, flags: 0x0}, 303: {region: 0xa4, script: 0x5a, flags: 0x0}, 304: {region: 0x80, script: 0x5a, flags: 0x0}, @@ -1957,7 +1965,7 @@ var likelyLang = [1330]likelyScriptRegion{ 408: {region: 0x165, script: 0x2c, flags: 0x0}, 409: {region: 0x165, script: 0x5a, flags: 0x0}, 410: {region: 0x99, script: 0x22, flags: 0x0}, - 411: {region: 0x99, script: 0xe1, flags: 0x0}, + 411: {region: 0x99, script: 0xe6, flags: 0x0}, 412: {region: 0x95, script: 0x5a, flags: 0x0}, 413: {region: 0xd9, script: 0x5a, flags: 0x0}, 414: {region: 0x130, script: 0x32, flags: 0x0}, @@ -2000,7 +2008,7 @@ var likelyLang = [1330]likelyScriptRegion{ 451: {region: 0xe7, script: 0x5a, flags: 0x0}, 452: {region: 0x165, script: 0x5a, flags: 0x0}, 453: {region: 0x12b, script: 0x40, flags: 0x0}, - 454: {region: 0x53, script: 0x8d, flags: 0x0}, + 454: {region: 0x53, script: 0x90, flags: 0x0}, 455: {region: 0x165, script: 0x5a, flags: 0x0}, 456: {region: 0xe8, script: 0x5, flags: 0x0}, 457: {region: 0x99, script: 0x22, flags: 0x0}, @@ -2035,7 +2043,7 @@ var likelyLang = [1330]likelyScriptRegion{ 487: {region: 0xd6, script: 0x5a, flags: 0x0}, 488: {region: 0x165, script: 0x5a, flags: 0x0}, 489: {region: 0x165, script: 0x5a, flags: 0x0}, - 490: {region: 0x53, script: 0xf3, flags: 0x0}, + 490: {region: 0x53, script: 0xfa, flags: 0x0}, 491: {region: 0x165, script: 0x5a, flags: 0x0}, 492: {region: 0x135, script: 0x5a, flags: 0x0}, 493: {region: 0x165, script: 0x5a, flags: 0x0}, @@ -2095,7 +2103,7 @@ var likelyLang = [1330]likelyScriptRegion{ 547: {region: 0x12f, script: 0x5a, flags: 0x0}, 548: {region: 0x122, script: 0x5, flags: 0x0}, 549: {region: 0x165, script: 0x5a, flags: 0x0}, - 550: {region: 0x123, script: 0xe6, flags: 0x0}, + 550: {region: 0x123, script: 0xeb, flags: 0x0}, 551: {region: 0x5a, script: 0x5a, flags: 0x0}, 552: {region: 0x52, script: 0x5a, flags: 0x0}, 553: {region: 0x165, script: 0x5a, flags: 0x0}, @@ -2107,7 +2115,7 @@ var likelyLang = [1330]likelyScriptRegion{ 559: {region: 0x165, script: 0x5a, flags: 0x0}, 560: {region: 0x41, script: 0x5a, flags: 0x0}, 561: {region: 0x99, script: 0x5a, flags: 0x0}, - 562: {region: 0x53, script: 0xdd, flags: 0x0}, + 562: {region: 0x53, script: 0xe2, flags: 0x0}, 563: {region: 0x99, script: 0x22, flags: 0x0}, 564: {region: 0xc3, script: 0x5a, flags: 0x0}, 565: {region: 0x165, script: 0x5a, flags: 0x0}, @@ -2191,7 +2199,7 @@ var likelyLang = [1330]likelyScriptRegion{ 643: {region: 0x165, script: 0x5a, flags: 0x0}, 644: {region: 0x165, script: 0x5a, flags: 0x0}, 645: {region: 0x165, script: 0x2c, flags: 0x0}, - 646: {region: 0x123, script: 0xe6, flags: 0x0}, + 646: {region: 0x123, script: 0xeb, flags: 0x0}, 647: {region: 0xe8, script: 0x5, flags: 0x0}, 648: {region: 0x165, script: 0x5a, flags: 0x0}, 649: {region: 0x165, script: 0x5a, flags: 0x0}, @@ -2211,7 +2219,7 @@ var likelyLang = [1330]likelyScriptRegion{ 663: {region: 0x165, script: 0x5a, flags: 0x0}, 664: {region: 0x95, script: 0x5a, flags: 0x0}, 665: {region: 0x165, script: 0x5a, flags: 0x0}, - 666: {region: 0x53, script: 0xe6, flags: 0x0}, + 666: {region: 0x53, script: 0xeb, flags: 0x0}, 667: {region: 0x165, script: 0x5a, flags: 0x0}, 668: {region: 0x165, script: 0x5a, flags: 0x0}, 669: {region: 0x165, script: 0x5a, flags: 0x0}, @@ -2235,7 +2243,7 @@ var likelyLang = [1330]likelyScriptRegion{ 687: {region: 0x135, script: 0x5a, flags: 0x0}, 688: {region: 0x165, script: 0x5a, flags: 0x0}, 689: {region: 0x165, script: 0x5a, flags: 0x0}, - 690: {region: 0x99, script: 0xe1, flags: 0x0}, + 690: {region: 0x99, script: 0xe6, flags: 0x0}, 691: {region: 0x9e, script: 0x5a, flags: 0x0}, 692: {region: 0x165, script: 0x5a, flags: 0x0}, 693: {region: 0x4b, script: 0x5a, flags: 0x0}, @@ -2257,7 +2265,7 @@ var likelyLang = [1330]likelyScriptRegion{ 709: {region: 0xa4, script: 0x5a, flags: 0x0}, 710: {region: 0x9c, script: 0x5, flags: 0x0}, 711: {region: 0xb8, script: 0x5a, flags: 0x0}, - 712: {region: 0x123, script: 0xe6, flags: 0x0}, + 712: {region: 0x123, script: 0xeb, flags: 0x0}, 713: {region: 0x53, script: 0x3b, flags: 0x0}, 714: {region: 0x12b, script: 0x5a, flags: 0x0}, 715: {region: 0x95, script: 0x5a, flags: 0x0}, @@ -2424,7 +2432,7 @@ var likelyLang = [1330]likelyScriptRegion{ 879: {region: 0xda, script: 0x5a, flags: 0x0}, 880: {region: 0x123, script: 0x56, flags: 0x0}, 881: {region: 0x99, script: 0x22, flags: 0x0}, - 882: {region: 0x10c, script: 0xc4, flags: 0x0}, + 882: {region: 0x10c, script: 0xc9, flags: 0x0}, 883: {region: 0x165, script: 0x5a, flags: 0x0}, 884: {region: 0x165, script: 0x5a, flags: 0x0}, 885: {region: 0x84, script: 0x7c, flags: 0x0}, @@ -2478,11 +2486,11 @@ var likelyLang = [1330]likelyScriptRegion{ 934: {region: 0x135, script: 0x5a, flags: 0x0}, 935: {region: 0x49, script: 0x5a, flags: 0x0}, 936: {region: 0x165, script: 0x5a, flags: 0x0}, - 937: {region: 0x9c, script: 0xf0, flags: 0x0}, + 937: {region: 0x9c, script: 0xf7, flags: 0x0}, 938: {region: 0x165, script: 0x5a, flags: 0x0}, 939: {region: 0x60, script: 0x5a, flags: 0x0}, 940: {region: 0x165, script: 0x5, flags: 0x0}, - 941: {region: 0xb0, script: 0x8b, flags: 0x0}, + 941: {region: 0xb0, script: 0x8e, flags: 0x0}, 943: {region: 0x165, script: 0x5a, flags: 0x0}, 944: {region: 0x165, script: 0x5a, flags: 0x0}, 945: {region: 0x99, script: 0x12, flags: 0x0}, @@ -2548,7 +2556,7 @@ var likelyLang = [1330]likelyScriptRegion{ 1005: {region: 0x95, script: 0x5a, flags: 0x0}, 1006: {region: 0x99, script: 0x5a, flags: 0x0}, 1007: {region: 0x114, script: 0x5a, flags: 0x0}, - 1008: {region: 0x99, script: 0xc8, flags: 0x0}, + 1008: {region: 0x99, script: 0xcd, flags: 0x0}, 1009: {region: 0x165, script: 0x5a, flags: 0x0}, 1010: {region: 0x165, script: 0x5a, flags: 0x0}, 1011: {region: 0x12f, script: 0x5a, flags: 0x0}, @@ -2571,11 +2579,11 @@ var likelyLang = [1330]likelyScriptRegion{ 1028: {region: 0xb6, script: 0x5a, flags: 0x0}, 1029: {region: 0x165, script: 0x2c, flags: 0x0}, 1030: {region: 0x165, script: 0x5a, flags: 0x0}, - 1032: {region: 0xba, script: 0xe3, flags: 0x0}, + 1032: {region: 0xba, script: 0xe8, flags: 0x0}, 1033: {region: 0x165, script: 0x5a, flags: 0x0}, 1034: {region: 0xc4, script: 0x75, flags: 0x0}, 1035: {region: 0x165, script: 0x5, flags: 0x0}, - 1036: {region: 0xb3, script: 0xcf, flags: 0x0}, + 1036: {region: 0xb3, script: 0xd4, flags: 0x0}, 1037: {region: 0x6f, script: 0x5a, flags: 0x0}, 1038: {region: 0x165, script: 0x5a, flags: 0x0}, 1039: {region: 0x165, script: 0x5a, flags: 0x0}, @@ -2594,7 +2602,7 @@ var likelyLang = [1330]likelyScriptRegion{ 1052: {region: 0x10c, script: 0x5a, flags: 0x0}, 1054: {region: 0x10c, script: 0x5a, flags: 0x0}, 1055: {region: 0x72, script: 0x5a, flags: 0x0}, - 1056: {region: 0x97, script: 0xc5, flags: 0x0}, + 1056: {region: 0x97, script: 0xca, flags: 0x0}, 1057: {region: 0x165, script: 0x5a, flags: 0x0}, 1058: {region: 0x72, script: 0x5a, flags: 0x0}, 1059: {region: 0x164, script: 0x5a, flags: 0x0}, @@ -2606,14 +2614,14 @@ var likelyLang = [1330]likelyScriptRegion{ 1065: {region: 0x115, script: 0x5a, flags: 0x0}, 1066: {region: 0x165, script: 0x5a, flags: 0x0}, 1067: {region: 0x165, script: 0x5a, flags: 0x0}, - 1068: {region: 0x123, script: 0xe6, flags: 0x0}, + 1068: {region: 0x123, script: 0xeb, flags: 0x0}, 1069: {region: 0x165, script: 0x5a, flags: 0x0}, 1070: {region: 0x165, script: 0x5a, flags: 0x0}, 1071: {region: 0x165, script: 0x5a, flags: 0x0}, 1072: {region: 0x165, script: 0x5a, flags: 0x0}, 1073: {region: 0x27, script: 0x5a, flags: 0x0}, 1074: {region: 0x37, script: 0x5, flags: 0x1}, - 1075: {region: 0x99, script: 0xd2, flags: 0x0}, + 1075: {region: 0x99, script: 0xd7, flags: 0x0}, 1076: {region: 0x116, script: 0x5a, flags: 0x0}, 1077: {region: 0x114, script: 0x5a, flags: 0x0}, 1078: {region: 0x99, script: 0x22, flags: 0x0}, @@ -2640,9 +2648,9 @@ var likelyLang = [1330]likelyScriptRegion{ 1099: {region: 0x95, script: 0x5a, flags: 0x0}, 1100: {region: 0x165, script: 0x5a, flags: 0x0}, 1101: {region: 0x35, script: 0xe, flags: 0x0}, - 1102: {region: 0x9b, script: 0xd6, flags: 0x0}, + 1102: {region: 0x9b, script: 0xdb, flags: 0x0}, 1103: {region: 0xe9, script: 0x5a, flags: 0x0}, - 1104: {region: 0x99, script: 0xde, flags: 0x0}, + 1104: {region: 0x99, script: 0xe3, flags: 0x0}, 1105: {region: 0xdb, script: 0x22, flags: 0x0}, 1106: {region: 0x165, script: 0x5a, flags: 0x0}, 1107: {region: 0x165, script: 0x5a, flags: 0x0}, @@ -2656,10 +2664,10 @@ var likelyLang = [1330]likelyScriptRegion{ 1115: {region: 0x165, script: 0x5a, flags: 0x0}, 1116: {region: 0x165, script: 0x5a, flags: 0x0}, 1117: {region: 0x99, script: 0x52, flags: 0x0}, - 1118: {region: 0x53, script: 0xdc, flags: 0x0}, + 1118: {region: 0x53, script: 0xe1, flags: 0x0}, 1119: {region: 0xdb, script: 0x22, flags: 0x0}, 1120: {region: 0xdb, script: 0x22, flags: 0x0}, - 1121: {region: 0x99, script: 0xe1, flags: 0x0}, + 1121: {region: 0x99, script: 0xe6, flags: 0x0}, 1122: {region: 0x165, script: 0x5a, flags: 0x0}, 1123: {region: 0x112, script: 0x5a, flags: 0x0}, 1124: {region: 0x131, script: 0x5a, flags: 0x0}, @@ -2669,7 +2677,7 @@ var likelyLang = [1330]likelyScriptRegion{ 1128: {region: 0x165, script: 0x5a, flags: 0x0}, 1129: {region: 0x165, script: 0x5a, flags: 0x0}, 1130: {region: 0x165, script: 0x5a, flags: 0x0}, - 1131: {region: 0x123, script: 0xe6, flags: 0x0}, + 1131: {region: 0x123, script: 0xeb, flags: 0x0}, 1132: {region: 0xdb, script: 0x22, flags: 0x0}, 1133: {region: 0xdb, script: 0x22, flags: 0x0}, 1134: {region: 0xdb, script: 0x22, flags: 0x0}, @@ -2708,14 +2716,14 @@ var likelyLang = [1330]likelyScriptRegion{ 1167: {region: 0x87, script: 0x34, flags: 0x0}, 1168: {region: 0xdb, script: 0x22, flags: 0x0}, 1169: {region: 0xe7, script: 0x5a, flags: 0x0}, - 1170: {region: 0x43, script: 0xe7, flags: 0x0}, + 1170: {region: 0x43, script: 0xec, flags: 0x0}, 1171: {region: 0x165, script: 0x5a, flags: 0x0}, 1172: {region: 0x106, script: 0x20, flags: 0x0}, 1173: {region: 0x165, script: 0x5a, flags: 0x0}, 1174: {region: 0x165, script: 0x5a, flags: 0x0}, 1175: {region: 0x131, script: 0x5a, flags: 0x0}, 1176: {region: 0x165, script: 0x5a, flags: 0x0}, - 1177: {region: 0x123, script: 0xe6, flags: 0x0}, + 1177: {region: 0x123, script: 0xeb, flags: 0x0}, 1178: {region: 0x32, script: 0x5a, flags: 0x0}, 1179: {region: 0x165, script: 0x5a, flags: 0x0}, 1180: {region: 0x165, script: 0x5a, flags: 0x0}, @@ -2726,7 +2734,7 @@ var likelyLang = [1330]likelyScriptRegion{ 1185: {region: 0x165, script: 0x5a, flags: 0x0}, 1187: {region: 0x165, script: 0x5a, flags: 0x0}, 1188: {region: 0xd4, script: 0x5a, flags: 0x0}, - 1189: {region: 0x53, script: 0xdf, flags: 0x0}, + 1189: {region: 0x53, script: 0xe4, flags: 0x0}, 1190: {region: 0xe5, script: 0x5a, flags: 0x0}, 1191: {region: 0x165, script: 0x5a, flags: 0x0}, 1192: {region: 0x106, script: 0x20, flags: 0x0}, @@ -2734,7 +2742,7 @@ var likelyLang = [1330]likelyScriptRegion{ 1194: {region: 0x165, script: 0x5a, flags: 0x0}, 1195: {region: 0x106, script: 0x20, flags: 0x0}, 1196: {region: 0x3f, script: 0x4, flags: 0x1}, - 1197: {region: 0x11c, script: 0xea, flags: 0x0}, + 1197: {region: 0x11c, script: 0xf0, flags: 0x0}, 1198: {region: 0x130, script: 0x20, flags: 0x0}, 1199: {region: 0x75, script: 0x5a, flags: 0x0}, 1200: {region: 0x2a, script: 0x5a, flags: 0x0}, @@ -2750,7 +2758,7 @@ var likelyLang = [1330]likelyScriptRegion{ 1211: {region: 0x165, script: 0x5a, flags: 0x0}, 1212: {region: 0x46, script: 0x4, flags: 0x1}, 1213: {region: 0x165, script: 0x5a, flags: 0x0}, - 1214: {region: 0xb4, script: 0xeb, flags: 0x0}, + 1214: {region: 0xb4, script: 0xf1, flags: 0x0}, 1215: {region: 0x165, script: 0x5a, flags: 0x0}, 1216: {region: 0x161, script: 0x5a, flags: 0x0}, 1217: {region: 0x9e, script: 0x5a, flags: 0x0}, @@ -2773,7 +2781,7 @@ var likelyLang = [1330]likelyScriptRegion{ 1234: {region: 0x165, script: 0x5a, flags: 0x0}, 1235: {region: 0xe7, script: 0x5a, flags: 0x0}, 1236: {region: 0x2f, script: 0x5a, flags: 0x0}, - 1237: {region: 0x99, script: 0xe1, flags: 0x0}, + 1237: {region: 0x99, script: 0xe6, flags: 0x0}, 1238: {region: 0x99, script: 0x22, flags: 0x0}, 1239: {region: 0x165, script: 0x5a, flags: 0x0}, 1240: {region: 0x165, script: 0x5a, flags: 0x0}, @@ -2813,9 +2821,9 @@ var likelyLang = [1330]likelyScriptRegion{ 1274: {region: 0x99, script: 0x22, flags: 0x0}, 1275: {region: 0x131, script: 0x5a, flags: 0x0}, 1276: {region: 0x165, script: 0x5a, flags: 0x0}, - 1277: {region: 0x9c, script: 0x8e, flags: 0x0}, + 1277: {region: 0x9c, script: 0x91, flags: 0x0}, 1278: {region: 0x165, script: 0x5a, flags: 0x0}, - 1279: {region: 0x15e, script: 0xc7, flags: 0x0}, + 1279: {region: 0x15e, script: 0xcc, flags: 0x0}, 1280: {region: 0x165, script: 0x5a, flags: 0x0}, 1281: {region: 0x165, script: 0x5a, flags: 0x0}, 1282: {region: 0xdb, script: 0x22, flags: 0x0}, @@ -2855,7 +2863,7 @@ var likelyLang = [1330]likelyScriptRegion{ 1316: {region: 0x10b, script: 0x5a, flags: 0x0}, 1318: {region: 0xa8, script: 0x5, flags: 0x0}, 1319: {region: 0xd9, script: 0x5a, flags: 0x0}, - 1320: {region: 0xba, script: 0xe3, flags: 0x0}, + 1320: {region: 0xba, script: 0xe8, flags: 0x0}, 1321: {region: 0x4d, script: 0x14, flags: 0x1}, 1322: {region: 0x53, script: 0x7d, flags: 0x0}, 1323: {region: 0x165, script: 0x5a, flags: 0x0}, @@ -2867,11 +2875,11 @@ var likelyLang = [1330]likelyScriptRegion{ } // likelyLangList holds lists info associated with likelyLang. -// Size: 388 bytes, 97 elements +// Size: 582 bytes, 97 elements var likelyLangList = [97]likelyScriptRegion{ 0: {region: 0x9c, script: 0x7, flags: 0x0}, 1: {region: 0xa1, script: 0x78, flags: 0x2}, - 2: {region: 0x11c, script: 0x84, flags: 0x2}, + 2: {region: 0x11c, script: 0x85, flags: 0x2}, 3: {region: 0x32, script: 0x5a, flags: 0x0}, 4: {region: 0x9b, script: 0x5, flags: 0x4}, 5: {region: 0x9c, script: 0x5, flags: 0x4}, @@ -2880,7 +2888,7 @@ var likelyLangList = [97]likelyScriptRegion{ 8: {region: 0x106, script: 0x20, flags: 0x0}, 9: {region: 0x38, script: 0x2f, flags: 0x2}, 10: {region: 0x135, script: 0x5a, flags: 0x0}, - 11: {region: 0x7b, script: 0xca, flags: 0x2}, + 11: {region: 0x7b, script: 0xcf, flags: 0x2}, 12: {region: 0x114, script: 0x5a, flags: 0x0}, 13: {region: 0x84, script: 0x1, flags: 0x2}, 14: {region: 0x5d, script: 0x1f, flags: 0x0}, @@ -2916,14 +2924,14 @@ var likelyLangList = [97]likelyScriptRegion{ 44: {region: 0x99, script: 0x36, flags: 0x0}, 45: {region: 0xe8, script: 0x5, flags: 0x4}, 46: {region: 0xe8, script: 0x5, flags: 0x2}, - 47: {region: 0x9c, script: 0x88, flags: 0x0}, - 48: {region: 0x53, script: 0x89, flags: 0x2}, - 49: {region: 0xba, script: 0xe3, flags: 0x0}, + 47: {region: 0x9c, script: 0x8b, flags: 0x0}, + 48: {region: 0x53, script: 0x8c, flags: 0x2}, + 49: {region: 0xba, script: 0xe8, flags: 0x0}, 50: {region: 0xd9, script: 0x5a, flags: 0x4}, 51: {region: 0xe8, script: 0x5, flags: 0x0}, 52: {region: 0x99, script: 0x22, flags: 0x2}, 53: {region: 0x99, script: 0x4f, flags: 0x2}, - 54: {region: 0x99, script: 0xce, flags: 0x2}, + 54: {region: 0x99, script: 0xd3, flags: 0x2}, 55: {region: 0x105, script: 0x20, flags: 0x0}, 56: {region: 0xbd, script: 0x5a, flags: 0x4}, 57: {region: 0x104, script: 0x5a, flags: 0x4}, @@ -2970,7 +2978,7 @@ var likelyLangList = [97]likelyScriptRegion{ type likelyLangScript struct { lang uint16 - script uint8 + script uint16 flags uint8 } @@ -2979,7 +2987,7 @@ type likelyLangScript struct { // for a given regionID, lang and script are the index and size respectively // of the list in likelyRegionList. // TODO: exclude containers and user-definable regions from the list. -// Size: 1432 bytes, 358 elements +// Size: 2148 bytes, 358 elements var likelyRegion = [358]likelyLangScript{ 34: {lang: 0xd7, script: 0x5a, flags: 0x0}, 35: {lang: 0x3a, script: 0x5, flags: 0x0}, @@ -3086,7 +3094,7 @@ var likelyRegion = [358]likelyLangScript{ 175: {lang: 0x27, script: 0x2, flags: 0x1}, 176: {lang: 0x3a, script: 0x5, flags: 0x0}, 178: {lang: 0x10d, script: 0x5a, flags: 0x0}, - 179: {lang: 0x40c, script: 0xcf, flags: 0x0}, + 179: {lang: 0x40c, script: 0xd4, flags: 0x0}, 181: {lang: 0x43b, script: 0x5a, flags: 0x0}, 182: {lang: 0x2c0, script: 0x5a, flags: 0x0}, 183: {lang: 0x15e, script: 0x5a, flags: 0x0}, @@ -3107,7 +3115,7 @@ var likelyRegion = [358]likelyLangScript{ 201: {lang: 0x35, script: 0x2, flags: 0x1}, 203: {lang: 0x320, script: 0x5a, flags: 0x0}, 204: {lang: 0x37, script: 0x3, flags: 0x1}, - 205: {lang: 0x128, script: 0xe5, flags: 0x0}, + 205: {lang: 0x128, script: 0xea, flags: 0x0}, 207: {lang: 0x13e, script: 0x5a, flags: 0x0}, 208: {lang: 0x31f, script: 0x5a, flags: 0x0}, 209: {lang: 0x3c0, script: 0x5a, flags: 0x0}, @@ -3185,7 +3193,7 @@ var likelyRegion = [358]likelyLangScript{ } // likelyRegionList holds lists info associated with likelyRegion. -// Size: 372 bytes, 93 elements +// Size: 558 bytes, 93 elements var likelyRegionList = [93]likelyLangScript{ 0: {lang: 0x148, script: 0x5, flags: 0x0}, 1: {lang: 0x476, script: 0x5a, flags: 0x0}, @@ -3195,12 +3203,12 @@ var likelyRegionList = [93]likelyLangScript{ 5: {lang: 0x274, script: 0x5a, flags: 0x0}, 6: {lang: 0xb7, script: 0x5a, flags: 0x0}, 7: {lang: 0x432, script: 0x20, flags: 0x0}, - 8: {lang: 0x12d, script: 0xe7, flags: 0x0}, + 8: {lang: 0x12d, script: 0xec, flags: 0x0}, 9: {lang: 0x351, script: 0x22, flags: 0x0}, 10: {lang: 0x529, script: 0x3b, flags: 0x0}, 11: {lang: 0x4ac, script: 0x5, flags: 0x0}, 12: {lang: 0x523, script: 0x5a, flags: 0x0}, - 13: {lang: 0x29a, script: 0xe6, flags: 0x0}, + 13: {lang: 0x29a, script: 0xeb, flags: 0x0}, 14: {lang: 0x136, script: 0x34, flags: 0x0}, 15: {lang: 0x48a, script: 0x5a, flags: 0x0}, 16: {lang: 0x3a, script: 0x5, flags: 0x0}, @@ -3223,11 +3231,11 @@ var likelyRegionList = [93]likelyLangScript{ 33: {lang: 0x476, script: 0x5a, flags: 0x0}, 34: {lang: 0x24a, script: 0x4e, flags: 0x0}, 35: {lang: 0xe6, script: 0x5, flags: 0x0}, - 36: {lang: 0x226, script: 0xe6, flags: 0x0}, + 36: {lang: 0x226, script: 0xeb, flags: 0x0}, 37: {lang: 0x3a, script: 0x5, flags: 0x0}, 38: {lang: 0x15e, script: 0x5a, flags: 0x0}, 39: {lang: 0x2b8, script: 0x57, flags: 0x0}, - 40: {lang: 0x226, script: 0xe6, flags: 0x0}, + 40: {lang: 0x226, script: 0xeb, flags: 0x0}, 41: {lang: 0x3a, script: 0x5, flags: 0x0}, 42: {lang: 0x15e, script: 0x5a, flags: 0x0}, 43: {lang: 0x3dc, script: 0x5a, flags: 0x0}, @@ -3260,7 +3268,7 @@ var likelyRegionList = [93]likelyLangScript{ 70: {lang: 0x15e, script: 0x5a, flags: 0x0}, 71: {lang: 0x15e, script: 0x5a, flags: 0x0}, 72: {lang: 0x35, script: 0x5, flags: 0x0}, - 73: {lang: 0x46b, script: 0xe6, flags: 0x0}, + 73: {lang: 0x46b, script: 0xeb, flags: 0x0}, 74: {lang: 0x2ec, script: 0x5, flags: 0x0}, 75: {lang: 0x30f, script: 0x75, flags: 0x0}, 76: {lang: 0x467, script: 0x20, flags: 0x0}, @@ -3285,7 +3293,7 @@ var likelyRegionList = [93]likelyLangScript{ type likelyTag struct { lang uint16 region uint16 - script uint8 + script uint16 } // Size: 198 bytes, 33 elements @@ -3446,8 +3454,8 @@ var regionInclusionNext = [73]uint8{ type parentRel struct { lang uint16 - script uint8 - maxScript uint8 + script uint16 + maxScript uint16 toRegion uint16 fromRegion []uint16 } @@ -3461,4 +3469,4 @@ var parents = [5]parentRel{ 4: {lang: 0x529, script: 0x3c, maxScript: 0x3c, toRegion: 0x8d, fromRegion: []uint16{0xc6}}, } -// Total table size 26398 bytes (25KiB); checksum: 1C859EA7 +// Total table size 30244 bytes (29KiB); checksum: B6B15F30 diff --git a/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go b/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go index 575cea870..e5c53b1b3 100644 --- a/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go +++ b/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go @@ -74,7 +74,7 @@ type AcceptRange struct { // AcceptRanges is a slice of AcceptRange values. For a given byte sequence b // -// AcceptRanges[First[b[0]]>>AcceptShift] +// AcceptRanges[First[b[0]]>>AcceptShift] // // will give the value of AcceptRange for the multi-byte UTF-8 sequence starting // at b[0]. diff --git a/vendor/golang.org/x/text/language/doc.go b/vendor/golang.org/x/text/language/doc.go index 8afecd50e..212b77c90 100644 --- a/vendor/golang.org/x/text/language/doc.go +++ b/vendor/golang.org/x/text/language/doc.go @@ -10,18 +10,17 @@ // and provides the user with the best experience // (see https://blog.golang.org/matchlang). // -// -// Matching preferred against supported languages +// # Matching preferred against supported languages // // A Matcher for an application that supports English, Australian English, // Danish, and standard Mandarin can be created as follows: // -// var matcher = language.NewMatcher([]language.Tag{ -// language.English, // The first language is used as fallback. -// language.MustParse("en-AU"), -// language.Danish, -// language.Chinese, -// }) +// var matcher = language.NewMatcher([]language.Tag{ +// language.English, // The first language is used as fallback. +// language.MustParse("en-AU"), +// language.Danish, +// language.Chinese, +// }) // // This list of supported languages is typically implied by the languages for // which there exists translations of the user interface. @@ -30,14 +29,14 @@ // language tags. // The MatchString finds best matches for such strings: // -// handler(w http.ResponseWriter, r *http.Request) { -// lang, _ := r.Cookie("lang") -// accept := r.Header.Get("Accept-Language") -// tag, _ := language.MatchStrings(matcher, lang.String(), accept) +// handler(w http.ResponseWriter, r *http.Request) { +// lang, _ := r.Cookie("lang") +// accept := r.Header.Get("Accept-Language") +// tag, _ := language.MatchStrings(matcher, lang.String(), accept) // -// // tag should now be used for the initialization of any -// // locale-specific service. -// } +// // tag should now be used for the initialization of any +// // locale-specific service. +// } // // The Matcher's Match method can be used to match Tags directly. // @@ -48,8 +47,7 @@ // For instance, it will know that a reader of Bokmål Danish can read Norwegian // and will know that Cantonese ("yue") is a good match for "zh-HK". // -// -// Using match results +// # Using match results // // To guarantee a consistent user experience to the user it is important to // use the same language tag for the selection of any locale-specific services. @@ -58,9 +56,9 @@ // More subtly confusing is using the wrong sorting order or casing // algorithm for a certain language. // -// All the packages in x/text that provide locale-specific services -// (e.g. collate, cases) should be initialized with the tag that was -// obtained at the start of an interaction with the user. +// All the packages in x/text that provide locale-specific services +// (e.g. collate, cases) should be initialized with the tag that was +// obtained at the start of an interaction with the user. // // Note that Tag that is returned by Match and MatchString may differ from any // of the supported languages, as it may contain carried over settings from @@ -70,8 +68,7 @@ // Match and MatchString both return the index of the matched supported tag // to simplify associating such data with the matched tag. // -// -// Canonicalization +// # Canonicalization // // If one uses the Matcher to compare languages one does not need to // worry about canonicalization. @@ -92,10 +89,9 @@ // equivalence relations. The CanonType type can be used to alter the // canonicalization form. // -// References +// # References // // BCP 47 - Tags for Identifying Languages http://tools.ietf.org/html/bcp47 -// package language // import "golang.org/x/text/language" // TODO: explanation on how to match languages for your own locale-specific diff --git a/vendor/golang.org/x/text/language/match.go b/vendor/golang.org/x/text/language/match.go index f73492134..ee45f4947 100644 --- a/vendor/golang.org/x/text/language/match.go +++ b/vendor/golang.org/x/text/language/match.go @@ -545,7 +545,7 @@ type bestMatch struct { // match as the preferred match. // // If pin is true and have and tag are a strong match, it will henceforth only -// consider matches for this language. This corresponds to the nothing that most +// consider matches for this language. This corresponds to the idea that most // users have a strong preference for the first defined language. A user can // still prefer a second language over a dialect of the preferred language by // explicitly specifying dialects, e.g. "en, nl, en-GB". In this case pin should diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go index 59b041008..b982d9e42 100644 --- a/vendor/golang.org/x/text/language/parse.go +++ b/vendor/golang.org/x/text/language/parse.go @@ -147,6 +147,7 @@ func update(b *language.Builder, part ...interface{}) (err error) { } var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight") +var errTagListTooLarge = errors.New("tag list exceeds max length") // ParseAcceptLanguage parses the contents of an Accept-Language header as // defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and @@ -164,6 +165,10 @@ func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) { } }() + if strings.Count(s, "-") > 1000 { + return nil, nil, errTagListTooLarge + } + var entry string for s != "" { if entry, s = split(s, ','); entry == "" { diff --git a/vendor/golang.org/x/text/language/tables.go b/vendor/golang.org/x/text/language/tables.go index 96b57f610..34a732b69 100644 --- a/vendor/golang.org/x/text/language/tables.go +++ b/vendor/golang.org/x/text/language/tables.go @@ -39,12 +39,12 @@ const ( _Hani = 57 _Hans = 59 _Hant = 60 - _Qaaa = 143 - _Qaai = 151 - _Qabx = 192 - _Zinh = 245 - _Zyyy = 250 - _Zzzz = 251 + _Qaaa = 147 + _Qaai = 155 + _Qabx = 196 + _Zinh = 252 + _Zyyy = 257 + _Zzzz = 258 ) var regionToGroups = []uint8{ // 358 elements @@ -265,9 +265,9 @@ var matchScript = []scriptIntelligibility{ // 26 elements 13: {wantLang: 0x39d, haveLang: 0x139, wantScript: 0x36, haveScript: 0x5a, distance: 0xa}, 14: {wantLang: 0x3be, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5a, distance: 0xa}, 15: {wantLang: 0x3fa, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5a, distance: 0xa}, - 16: {wantLang: 0x40c, haveLang: 0x139, wantScript: 0xcf, haveScript: 0x5a, distance: 0xa}, - 17: {wantLang: 0x450, haveLang: 0x139, wantScript: 0xde, haveScript: 0x5a, distance: 0xa}, - 18: {wantLang: 0x461, haveLang: 0x139, wantScript: 0xe1, haveScript: 0x5a, distance: 0xa}, + 16: {wantLang: 0x40c, haveLang: 0x139, wantScript: 0xd4, haveScript: 0x5a, distance: 0xa}, + 17: {wantLang: 0x450, haveLang: 0x139, wantScript: 0xe3, haveScript: 0x5a, distance: 0xa}, + 18: {wantLang: 0x461, haveLang: 0x139, wantScript: 0xe6, haveScript: 0x5a, distance: 0xa}, 19: {wantLang: 0x46f, haveLang: 0x139, wantScript: 0x2c, haveScript: 0x5a, distance: 0xa}, 20: {wantLang: 0x476, haveLang: 0x3e2, wantScript: 0x5a, haveScript: 0x20, distance: 0xa}, 21: {wantLang: 0x4b4, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5a, distance: 0xa}, diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index e4c081101..9d2ae547b 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -193,14 +193,14 @@ func (p *paragraph) run() { // // At the end of this function: // -// - The member variable matchingPDI is set to point to the index of the -// matching PDI character for each isolate initiator character. If there is -// no matching PDI, it is set to the length of the input text. For other -// characters, it is set to -1. -// - The member variable matchingIsolateInitiator is set to point to the -// index of the matching isolate initiator character for each PDI character. -// If there is no matching isolate initiator, or the character is not a PDI, -// it is set to -1. +// - The member variable matchingPDI is set to point to the index of the +// matching PDI character for each isolate initiator character. If there is +// no matching PDI, it is set to the length of the input text. For other +// characters, it is set to -1. +// - The member variable matchingIsolateInitiator is set to point to the +// index of the matching isolate initiator character for each PDI character. +// If there is no matching isolate initiator, or the character is not a PDI, +// it is set to -1. func (p *paragraph) determineMatchingIsolates() { p.matchingPDI = make([]int, p.Len()) p.matchingIsolateInitiator = make([]int, p.Len()) @@ -435,7 +435,7 @@ func maxLevel(a, b level) level { } // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, -// either L or R, for each isolating run sequence. +// either L or R, for each isolating run sequence. func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { length := len(indexes) types := make([]Class, length) @@ -495,9 +495,9 @@ func (s *isolatingRunSequence) resolveWeakTypes() { if t == NSM { s.types[i] = precedingCharacterType } else { - if t.in(LRI, RLI, FSI, PDI) { - precedingCharacterType = ON - } + // if t.in(LRI, RLI, FSI, PDI) { + // precedingCharacterType = ON + // } precedingCharacterType = t } } @@ -905,7 +905,7 @@ func (p *paragraph) getLevels(linebreaks []int) []level { // Lines are concatenated from left to right. So for example, the fifth // character from the left on the third line is // -// getReordering(linebreaks)[linebreaks[1] + 4] +// getReordering(linebreaks)[linebreaks[1] + 4] // // (linebreaks[1] is the position after the last character of the second // line, which is also the index of the first character on the third line, diff --git a/vendor/golang.org/x/text/unicode/norm/forminfo.go b/vendor/golang.org/x/text/unicode/norm/forminfo.go index 526c7033a..d69ccb4f9 100644 --- a/vendor/golang.org/x/text/unicode/norm/forminfo.go +++ b/vendor/golang.org/x/text/unicode/norm/forminfo.go @@ -110,10 +110,11 @@ func (p Properties) BoundaryAfter() bool { } // We pack quick check data in 4 bits: -// 5: Combines forward (0 == false, 1 == true) -// 4..3: NFC_QC Yes(00), No (10), or Maybe (11) -// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition. -// 1..0: Number of trailing non-starters. +// +// 5: Combines forward (0 == false, 1 == true) +// 4..3: NFC_QC Yes(00), No (10), or Maybe (11) +// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition. +// 1..0: Number of trailing non-starters. // // When all 4 bits are zero, the character is inert, meaning it is never // influenced by normalization. diff --git a/vendor/golang.org/x/text/unicode/norm/normalize.go b/vendor/golang.org/x/text/unicode/norm/normalize.go index 95efcf26e..4747ad07a 100644 --- a/vendor/golang.org/x/text/unicode/norm/normalize.go +++ b/vendor/golang.org/x/text/unicode/norm/normalize.go @@ -18,16 +18,17 @@ import ( // A Form denotes a canonical representation of Unicode code points. // The Unicode-defined normalization and equivalence forms are: // -// NFC Unicode Normalization Form C -// NFD Unicode Normalization Form D -// NFKC Unicode Normalization Form KC -// NFKD Unicode Normalization Form KD +// NFC Unicode Normalization Form C +// NFD Unicode Normalization Form D +// NFKC Unicode Normalization Form KC +// NFKD Unicode Normalization Form KD // // For a Form f, this documentation uses the notation f(x) to mean // the bytes or string x converted to the given form. // A position n in x is called a boundary if conversion to the form can // proceed independently on both sides: -// f(x) == append(f(x[0:n]), f(x[n:])...) +// +// f(x) == append(f(x[0:n]), f(x[n:])...) // // References: https://unicode.org/reports/tr15/ and // https://unicode.org/notes/tn5/. diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index 96a130d30..9115ef257 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -7315,7 +7315,7 @@ const recompMapPacked = "" + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E - "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00v\x03#\x00\x00\x1e\x7f" + // 0x00760323: 0x00001E7F "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 @@ -7342,7 +7342,7 @@ const recompMapPacked = "" + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 - "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x01\x7f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 diff --git a/vendor/golang.org/x/text/width/tables10.0.0.go b/vendor/golang.org/x/text/width/tables10.0.0.go index 186b1d4ef..cd9d91caf 100644 --- a/vendor/golang.org/x/text/width/tables10.0.0.go +++ b/vendor/golang.org/x/text/width/tables10.0.0.go @@ -1146,21 +1146,31 @@ var widthIndex = [1408]uint8{ } // inverseData contains 4-byte entries of the following format: -// <0 padding> +// +// <0 padding> +// // The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the // UTF-8 encoding of the original rune. Mappings often have the following // pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// // By xor-ing the last byte the same entry can be shared by many mappings. This // reduces the total number of distinct entries by about two thirds. // The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } +// +// { 0x01, 0xE0, 0x00, 0x00 } +// // Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. +// +// E0 ^ A1 = 41. +// // Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. +// +// E0 ^ A2 = 42. +// // Note that because of the xor-ing, the byte sequence stored in the entry is // not valid UTF-8. var inverseData = [150][4]byte{ diff --git a/vendor/golang.org/x/text/width/tables11.0.0.go b/vendor/golang.org/x/text/width/tables11.0.0.go index 990f7622f..327eaef9b 100644 --- a/vendor/golang.org/x/text/width/tables11.0.0.go +++ b/vendor/golang.org/x/text/width/tables11.0.0.go @@ -1158,21 +1158,31 @@ var widthIndex = [1408]uint8{ } // inverseData contains 4-byte entries of the following format: -// <0 padding> +// +// <0 padding> +// // The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the // UTF-8 encoding of the original rune. Mappings often have the following // pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// // By xor-ing the last byte the same entry can be shared by many mappings. This // reduces the total number of distinct entries by about two thirds. // The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } +// +// { 0x01, 0xE0, 0x00, 0x00 } +// // Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. +// +// E0 ^ A1 = 41. +// // Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. +// +// E0 ^ A2 = 42. +// // Note that because of the xor-ing, the byte sequence stored in the entry is // not valid UTF-8. var inverseData = [150][4]byte{ diff --git a/vendor/golang.org/x/text/width/tables12.0.0.go b/vendor/golang.org/x/text/width/tables12.0.0.go index 85296297e..5c14ade6d 100644 --- a/vendor/golang.org/x/text/width/tables12.0.0.go +++ b/vendor/golang.org/x/text/width/tables12.0.0.go @@ -1178,21 +1178,31 @@ var widthIndex = [1408]uint8{ } // inverseData contains 4-byte entries of the following format: -// <0 padding> +// +// <0 padding> +// // The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the // UTF-8 encoding of the original rune. Mappings often have the following // pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// // By xor-ing the last byte the same entry can be shared by many mappings. This // reduces the total number of distinct entries by about two thirds. // The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } +// +// { 0x01, 0xE0, 0x00, 0x00 } +// // Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. +// +// E0 ^ A1 = 41. +// // Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. +// +// E0 ^ A2 = 42. +// // Note that because of the xor-ing, the byte sequence stored in the entry is // not valid UTF-8. var inverseData = [150][4]byte{ diff --git a/vendor/golang.org/x/text/width/tables13.0.0.go b/vendor/golang.org/x/text/width/tables13.0.0.go index bac3f1aee..ab258e384 100644 --- a/vendor/golang.org/x/text/width/tables13.0.0.go +++ b/vendor/golang.org/x/text/width/tables13.0.0.go @@ -1179,21 +1179,31 @@ var widthIndex = [1408]uint8{ } // inverseData contains 4-byte entries of the following format: -// <0 padding> +// +// <0 padding> +// // The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the // UTF-8 encoding of the original rune. Mappings often have the following // pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// // By xor-ing the last byte the same entry can be shared by many mappings. This // reduces the total number of distinct entries by about two thirds. // The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } +// +// { 0x01, 0xE0, 0x00, 0x00 } +// // Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. +// +// E0 ^ A1 = 41. +// // Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. +// +// E0 ^ A2 = 42. +// // Note that because of the xor-ing, the byte sequence stored in the entry is // not valid UTF-8. var inverseData = [150][4]byte{ diff --git a/vendor/golang.org/x/text/width/tables9.0.0.go b/vendor/golang.org/x/text/width/tables9.0.0.go index b3db84f6f..6781f3d96 100644 --- a/vendor/golang.org/x/text/width/tables9.0.0.go +++ b/vendor/golang.org/x/text/width/tables9.0.0.go @@ -1114,21 +1114,31 @@ var widthIndex = [1408]uint8{ } // inverseData contains 4-byte entries of the following format: -// <0 padding> +// +// <0 padding> +// // The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the // UTF-8 encoding of the original rune. Mappings often have the following // pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// // By xor-ing the last byte the same entry can be shared by many mappings. This // reduces the total number of distinct entries by about two thirds. // The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } +// +// { 0x01, 0xE0, 0x00, 0x00 } +// // Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. +// +// E0 ^ A1 = 41. +// // Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. +// +// E0 ^ A2 = 42. +// // Note that because of the xor-ing, the byte sequence stored in the entry is // not valid UTF-8. var inverseData = [150][4]byte{ diff --git a/vendor/modules.txt b/vendor/modules.txt index 0e1d6542f..12521bb2c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -230,6 +230,15 @@ github.com/golang/protobuf/ptypes/wrappers # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy +# github.com/google/fscrypt v0.3.3 +## explicit; go 1.11 +github.com/google/fscrypt/actions +github.com/google/fscrypt/crypto +github.com/google/fscrypt/filesystem +github.com/google/fscrypt/keyring +github.com/google/fscrypt/metadata +github.com/google/fscrypt/security +github.com/google/fscrypt/util # github.com/google/gnostic v0.5.7-v3refs ## explicit; go 1.12 github.com/google/gnostic/compiler @@ -476,6 +485,9 @@ github.com/pierrec/lz4/internal/xxh32 # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors +# github.com/pkg/xattr v0.4.7 +## explicit; go 1.14 +github.com/pkg/xattr # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib @@ -594,6 +606,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/zapcore # golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd ## explicit; go 1.17 +golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 @@ -602,6 +615,7 @@ golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/curve25519 golang.org/x/crypto/curve25519/internal/field golang.org/x/crypto/ed25519 +golang.org/x/crypto/hkdf golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/internal/subtle golang.org/x/crypto/pbkdf2 @@ -637,7 +651,7 @@ golang.org/x/sys/windows # golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 ## explicit; go 1.17 golang.org/x/term -# golang.org/x/text v0.3.7 +# golang.org/x/text v0.3.8 ## explicit; go 1.17 golang.org/x/text/cases golang.org/x/text/encoding