diff --git a/go.mod b/go.mod index badf4dde5..034d9de5c 100644 --- a/go.mod +++ b/go.mod @@ -14,8 +14,8 @@ require ( github.com/csi-addons/spec v0.1.2-0.20220906123848-52ce69f90900 github.com/gemalto/kmip-go v0.0.8-0.20220721195433-3fe83e2d3f26 github.com/golang/protobuf v1.5.2 - github.com/google/uuid v1.3.0 github.com/google/fscrypt v0.3.3 + github.com/google/uuid v1.3.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/hashicorp/vault/api v1.7.2 diff --git a/go.sum b/go.sum index 34b701e0a..7f408274e 100644 --- a/go.sum +++ b/go.sum @@ -1502,6 +1502,7 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= diff --git a/vendor/github.com/google/fscrypt/LICENSE b/vendor/github.com/google/fscrypt/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/google/fscrypt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/fscrypt/actions/callback.go b/vendor/github.com/google/fscrypt/actions/callback.go new file mode 100644 index 000000000..f15893db8 --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/callback.go @@ -0,0 +1,132 @@ +/* + * callback.go - defines how the caller of an action function passes along a key + * to be used in this package. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package actions + +import ( + "log" + + "github.com/pkg/errors" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/metadata" +) + +// ProtectorInfo is the information a caller will receive about a Protector +// before they have to return the corresponding key. This is currently a +// read-only view of metadata.ProtectorData. +type ProtectorInfo struct { + data *metadata.ProtectorData +} + +// Descriptor is the Protector's descriptor used to uniquely identify it. +func (pi *ProtectorInfo) Descriptor() string { return pi.data.GetProtectorDescriptor() } + +// Source indicates the type of the descriptor (how it should be unlocked). +func (pi *ProtectorInfo) Source() metadata.SourceType { return pi.data.GetSource() } + +// Name is used to describe custom passphrase and raw key descriptors. +func (pi *ProtectorInfo) Name() string { return pi.data.GetName() } + +// UID is used to identify the user for login passphrases. +func (pi *ProtectorInfo) UID() int64 { return pi.data.GetUid() } + +// KeyFunc is passed to a function that will require some type of key. +// The info parameter is provided so the callback knows which key to provide. +// The retry parameter indicates that a previous key provided by this callback +// was incorrect (this allows for user feedback like "incorrect passphrase"). +// +// For passphrase sources, the returned key should be a passphrase. For raw +// sources, the returned key should be a 256-bit cryptographic key. Consumers +// of the callback will wipe the returned key. An error returned by the callback +// will be propagated back to the caller. +type KeyFunc func(info ProtectorInfo, retry bool) (*crypto.Key, error) + +// getWrappingKey uses the provided callback to get the wrapping key +// corresponding to the ProtectorInfo. This runs the passphrase hash for +// passphrase sources or just relays the callback for raw sources. +func getWrappingKey(info ProtectorInfo, keyFn KeyFunc, retry bool) (*crypto.Key, error) { + // For raw key sources, we can just use the key directly. + if info.Source() == metadata.SourceType_raw_key { + return keyFn(info, retry) + } + + // Run the passphrase hash for other sources. + passphrase, err := keyFn(info, retry) + if err != nil { + return nil, err + } + defer passphrase.Wipe() + + log.Printf("running passphrase hash for protector %s", info.Descriptor()) + return crypto.PassphraseHash(passphrase, info.data.Salt, info.data.Costs) +} + +// unwrapProtectorKey uses the provided callback and ProtectorInfo to return +// the unwrapped protector key. This will repeatedly call keyFn to get the +// wrapping key until the correct key is returned by the callback or the +// callback returns an error. +func unwrapProtectorKey(info ProtectorInfo, keyFn KeyFunc) (*crypto.Key, error) { + retry := false + for { + wrappingKey, err := getWrappingKey(info, keyFn, retry) + if err != nil { + return nil, err + } + + protectorKey, err := crypto.Unwrap(wrappingKey, info.data.WrappedKey) + wrappingKey.Wipe() + + switch errors.Cause(err) { + case nil: + log.Printf("valid wrapping key for protector %s", info.Descriptor()) + return protectorKey, nil + case crypto.ErrBadAuth: + // After the first failure, we let the callback know we are retrying. + log.Printf("invalid wrapping key for protector %s", info.Descriptor()) + retry = true + continue + default: + return nil, err + } + } +} + +// ProtectorOption is information about a protector relative to a Policy. +type ProtectorOption struct { + ProtectorInfo + // LinkedMount is the mountpoint for a linked protector. It is nil if + // the protector is not a linked protector (or there is a LoadError). + LinkedMount *filesystem.Mount + // LoadError is non-nil if there was an error in getting the data for + // the protector. + LoadError error +} + +// OptionFunc is passed to a function that needs to unlock a Policy. +// The callback is used to specify which protector should be used to unlock a +// Policy. The descriptor indicates which Policy we are using, while the options +// correspond to the valid Protectors protecting the Policy. +// +// The OptionFunc should either return a valid index into options, which +// corresponds to the desired protector, or an error (which will be propagated +// back to the caller). +type OptionFunc func(policyDescriptor string, options []*ProtectorOption) (int, error) diff --git a/vendor/github.com/google/fscrypt/actions/config.go b/vendor/github.com/google/fscrypt/actions/config.go new file mode 100644 index 000000000..a8eb029db --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/config.go @@ -0,0 +1,293 @@ +/* + * config.go - Actions for creating a new config file, which includes new + * hashing costs and the config file's location. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package actions + +import ( + "bytes" + "fmt" + "log" + "os" + "runtime" + "time" + + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// ConfigFileLocation is the location of fscrypt's global settings. This can be +// overridden by the user of this package. +var ConfigFileLocation = "/etc/fscrypt.conf" + +// ErrBadConfig is an internal error that indicates that the config struct is invalid. +type ErrBadConfig struct { + Config *metadata.Config + UnderlyingError error +} + +func (err *ErrBadConfig) Error() string { + return fmt.Sprintf(`internal error: config is invalid: %s + + The invalid config is %s`, err.UnderlyingError, err.Config) +} + +// ErrBadConfigFile indicates that the config file is invalid. +type ErrBadConfigFile struct { + Path string + UnderlyingError error +} + +func (err *ErrBadConfigFile) Error() string { + return fmt.Sprintf("%q is invalid: %s", err.Path, err.UnderlyingError) +} + +// ErrConfigFileExists indicates that the config file already exists. +type ErrConfigFileExists struct { + Path string +} + +func (err *ErrConfigFileExists) Error() string { + return fmt.Sprintf("%q already exists", err.Path) +} + +// ErrNoConfigFile indicates that the config file doesn't exist. +type ErrNoConfigFile struct { + Path string +} + +func (err *ErrNoConfigFile) Error() string { + return fmt.Sprintf("%q doesn't exist", err.Path) +} + +const ( + // Permissions of the config file (global readable) + configPermissions = 0644 + // Config file should be created for writing and not already exist + createFlags = os.O_CREATE | os.O_WRONLY | os.O_EXCL + // 128 MiB is a large enough amount of memory to make the password hash + // very difficult to brute force on specialized hardware, but small + // enough to work on most GNU/Linux systems. + maxMemoryBytes = 128 * 1024 * 1024 +) + +var ( + timingPassphrase = []byte("I am a fake passphrase") + timingSalt = bytes.Repeat([]byte{42}, metadata.SaltLen) +) + +// CreateConfigFile creates a new config file at the appropriate location with +// the appropriate hashing costs and encryption parameters. The hashing will be +// configured to take as long as the specified time target. In addition, the +// version of encryption policy to use may be overridden from the default of v1. +func CreateConfigFile(target time.Duration, policyVersion int64) error { + // Create the config file before computing the hashing costs, so we fail + // immediately if the program has insufficient permissions. + configFile, err := filesystem.OpenFileOverridingUmask(ConfigFileLocation, + createFlags, configPermissions) + switch { + case os.IsExist(err): + return &ErrConfigFileExists{ConfigFileLocation} + case err != nil: + return err + } + defer configFile.Close() + + config := &metadata.Config{ + Source: metadata.DefaultSource, + Options: metadata.DefaultOptions, + } + + if policyVersion != 0 { + config.Options.PolicyVersion = policyVersion + } + + if config.HashCosts, err = getHashingCosts(target); err != nil { + return err + } + + log.Printf("Creating config at %q with %v\n", ConfigFileLocation, config) + return metadata.WriteConfig(config, configFile) +} + +// getConfig returns the current configuration struct. Any fields not specified +// in the config file use the system defaults. An error is returned if the +// config file hasn't been setup with CreateConfigFile yet or the config +// contains invalid data. +func getConfig() (*metadata.Config, error) { + configFile, err := os.Open(ConfigFileLocation) + switch { + case os.IsNotExist(err): + return nil, &ErrNoConfigFile{ConfigFileLocation} + case err != nil: + return nil, err + } + defer configFile.Close() + + log.Printf("Reading config from %q\n", ConfigFileLocation) + config, err := metadata.ReadConfig(configFile) + if err != nil { + return nil, &ErrBadConfigFile{ConfigFileLocation, err} + } + + // Use system defaults if not specified + if config.Source == metadata.SourceType_default { + config.Source = metadata.DefaultSource + log.Printf("Falling back to source of %q", config.Source.String()) + } + if config.Options.Padding == 0 { + config.Options.Padding = metadata.DefaultOptions.Padding + log.Printf("Falling back to padding of %d", config.Options.Padding) + } + if config.Options.Contents == metadata.EncryptionOptions_default { + config.Options.Contents = metadata.DefaultOptions.Contents + log.Printf("Falling back to contents mode of %q", config.Options.Contents) + } + if config.Options.Filenames == metadata.EncryptionOptions_default { + config.Options.Filenames = metadata.DefaultOptions.Filenames + log.Printf("Falling back to filenames mode of %q", config.Options.Filenames) + } + if config.Options.PolicyVersion == 0 { + config.Options.PolicyVersion = metadata.DefaultOptions.PolicyVersion + log.Printf("Falling back to policy version of %d", config.Options.PolicyVersion) + } + + if err := config.CheckValidity(); err != nil { + return nil, &ErrBadConfigFile{ConfigFileLocation, err} + } + + return config, nil +} + +// getHashingCosts returns hashing costs so that hashing a password will take +// approximately the target time. This is done using the total amount of RAM, +// the number of CPUs present, and by running the passphrase hash many times. +func getHashingCosts(target time.Duration) (*metadata.HashingCosts, error) { + log.Printf("Finding hashing costs that take %v\n", target) + + // Start out with the minimal possible costs that use all the CPUs. + nCPUs := int64(runtime.NumCPU()) + costs := &metadata.HashingCosts{ + Time: 1, + Memory: 8 * nCPUs, + Parallelism: nCPUs, + } + + // If even the minimal costs are not fast enough, just return the + // minimal costs and log a warning. + t, err := timeHashingCosts(costs) + if err != nil { + return nil, err + } + log.Printf("Min Costs={%v}\t-> %v\n", costs, t) + + if t > target { + log.Printf("time exceeded the target of %v.\n", target) + return costs, nil + } + + // Now we start doubling the costs until we reach the target. + memoryKiBLimit := memoryBytesLimit() / 1024 + for { + // Store a copy of the previous costs + costsPrev := *costs + tPrev := t + + // Double the memory up to the max, then double the time. + if costs.Memory < memoryKiBLimit { + costs.Memory = util.MinInt64(2*costs.Memory, memoryKiBLimit) + } else { + costs.Time *= 2 + } + + // If our hashing failed, return the last good set of costs. + if t, err = timeHashingCosts(costs); err != nil { + log.Printf("Hashing with costs={%v} failed: %v\n", costs, err) + return &costsPrev, nil + } + log.Printf("Costs={%v}\t-> %v\n", costs, t) + + // If we have reached the target time, we return a set of costs + // based on the linear interpolation between the last two times. + if t >= target { + f := float64(target-tPrev) / float64(t-tPrev) + return &metadata.HashingCosts{ + Time: betweenCosts(costsPrev.Time, costs.Time, f), + Memory: betweenCosts(costsPrev.Memory, costs.Memory, f), + Parallelism: costs.Parallelism, + }, nil + } + } +} + +// memoryBytesLimit returns the maximum amount of memory we will use for +// passphrase hashing. This will never be more than a reasonable maximum (for +// compatibility) or an 8th the available system RAM. +func memoryBytesLimit() int64 { + // The sysinfo syscall only fails if given a bad address + var info unix.Sysinfo_t + err := unix.Sysinfo(&info) + util.NeverError(err) + + totalRAMBytes := int64(info.Totalram) + return util.MinInt64(totalRAMBytes/8, maxMemoryBytes) +} + +// betweenCosts returns a cost between a and b. Specifically, it returns the +// floor of a + f*(b-a). This way, f=0 returns a and f=1 returns b. +func betweenCosts(a, b int64, f float64) int64 { + return a + int64(f*float64(b-a)) +} + +// timeHashingCosts runs the passphrase hash with the specified costs and +// returns the time it takes to hash the passphrase. +func timeHashingCosts(costs *metadata.HashingCosts) (time.Duration, error) { + passphrase, err := crypto.NewKeyFromReader(bytes.NewReader(timingPassphrase)) + if err != nil { + return 0, err + } + defer passphrase.Wipe() + + // Be sure to measure CPU time, not wall time (time.Now) + begin := cpuTimeInNanoseconds() + hash, err := crypto.PassphraseHash(passphrase, timingSalt, costs) + if err == nil { + hash.Wipe() + } + end := cpuTimeInNanoseconds() + + // This uses a lot of memory, run the garbage collector + runtime.GC() + + return time.Duration((end - begin) / costs.Parallelism), nil +} + +// cpuTimeInNanoseconds returns the nanosecond count based on the process's CPU usage. +// This number has no absolute meaning, only relative meaning to other calls. +func cpuTimeInNanoseconds() int64 { + var ts unix.Timespec + err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts) + // ClockGettime fails if given a bad address or on a VERY old system. + util.NeverError(err) + return unix.TimespecToNsec(ts) +} diff --git a/vendor/github.com/google/fscrypt/actions/context.go b/vendor/github.com/google/fscrypt/actions/context.go new file mode 100644 index 000000000..ac3f6d304 --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/context.go @@ -0,0 +1,184 @@ +/* + * context.go - top-level interface to fscrypt packages + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package actions is the high-level interface to the fscrypt packages. The +// functions here roughly correspond with commands for the tool in cmd/fscrypt. +// All of the actions include a significant amount of logging, so that good +// output can be provided for cmd/fscrypt's verbose mode. +// The top-level actions currently include: +// - Creating a new config file +// - Creating a context on which to perform actions +// - Creating, unlocking, and modifying Protectors +// - Creating, unlocking, and modifying Policies +package actions + +import ( + "log" + "os/user" + + "github.com/pkg/errors" + + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/keyring" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// ErrLocked indicates that the key hasn't been unwrapped yet. +var ErrLocked = errors.New("key needs to be unlocked first") + +// Context contains the necessary global state to perform most of fscrypt's +// actions. +type Context struct { + // Config is the struct loaded from the global config file. It can be + // modified after being loaded to customise parameters. + Config *metadata.Config + // Mount is the filesystem relative to which all Protectors and Policies + // are added, edited, removed, and applied, and to which policies using + // the filesystem keyring are provisioned. + Mount *filesystem.Mount + // TargetUser is the user for whom protectors are created, and to whose + // keyring policies using the user keyring are provisioned. It's also + // the user for whom the keys are claimed in the filesystem keyring when + // v2 policies are provisioned. + TargetUser *user.User + // TrustedUser is the user for whom policies and protectors are allowed + // to be read. Specifically, if TrustedUser is set, then only + // policies and protectors owned by TrustedUser or by root will be + // allowed to be read. If it's nil, then all policies and protectors + // the process has filesystem-level read access to will be allowed. + TrustedUser *user.User +} + +// NewContextFromPath makes a context for the filesystem containing the +// specified path and whose Config is loaded from the global config file. On +// success, the Context contains a valid Config and Mount. The target user +// defaults to the current effective user if none is specified. +func NewContextFromPath(path string, targetUser *user.User) (*Context, error) { + ctx, err := newContextFromUser(targetUser) + if err != nil { + return nil, err + } + if ctx.Mount, err = filesystem.FindMount(path); err != nil { + return nil, err + } + + log.Printf("%s is on %s filesystem %q (%s)", path, + ctx.Mount.FilesystemType, ctx.Mount.Path, ctx.Mount.Device) + return ctx, nil +} + +// NewContextFromMountpoint makes a context for the filesystem at the specified +// mountpoint and whose Config is loaded from the global config file. On +// success, the Context contains a valid Config and Mount. The target user +// defaults to the current effective user if none is specified. +func NewContextFromMountpoint(mountpoint string, targetUser *user.User) (*Context, error) { + ctx, err := newContextFromUser(targetUser) + if err != nil { + return nil, err + } + if ctx.Mount, err = filesystem.GetMount(mountpoint); err != nil { + return nil, err + } + + log.Printf("found %s filesystem %q (%s)", ctx.Mount.FilesystemType, + ctx.Mount.Path, ctx.Mount.Device) + return ctx, nil +} + +// newContextFromUser makes a context with the corresponding target user, and +// whose Config is loaded from the global config file. If the target user is +// nil, the effective user is used. +func newContextFromUser(targetUser *user.User) (*Context, error) { + var err error + if targetUser == nil { + if targetUser, err = util.EffectiveUser(); err != nil { + return nil, err + } + } + + ctx := &Context{TargetUser: targetUser} + if ctx.Config, err = getConfig(); err != nil { + return nil, err + } + + // By default, when running as a non-root user we only read policies and + // protectors owned by the user or root. When running as root, we allow + // reading all policies and protectors. + if !ctx.Config.GetAllowCrossUserMetadata() && !util.IsUserRoot() { + ctx.TrustedUser, err = util.EffectiveUser() + if err != nil { + return nil, err + } + } + + log.Printf("creating context for user %q", targetUser.Username) + return ctx, nil +} + +// checkContext verifies that the context contains a valid config and a mount +// which is being used with fscrypt. +func (ctx *Context) checkContext() error { + if err := ctx.Config.CheckValidity(); err != nil { + return &ErrBadConfig{ctx.Config, err} + } + return ctx.Mount.CheckSetup(ctx.TrustedUser) +} + +func (ctx *Context) getKeyringOptions() *keyring.Options { + return &keyring.Options{ + Mount: ctx.Mount, + User: ctx.TargetUser, + UseFsKeyringForV1Policies: ctx.Config.GetUseFsKeyringForV1Policies(), + } +} + +// getProtectorOption returns the ProtectorOption for the protector on the +// context's mountpoint with the specified descriptor. +func (ctx *Context) getProtectorOption(protectorDescriptor string) *ProtectorOption { + mnt, data, err := ctx.Mount.GetProtector(protectorDescriptor, ctx.TrustedUser) + if err != nil { + return &ProtectorOption{ProtectorInfo{}, nil, err} + } + + info := ProtectorInfo{data} + // No linked path if on the same mountpoint + if mnt == ctx.Mount { + return &ProtectorOption{info, nil, nil} + } + return &ProtectorOption{info, mnt, nil} +} + +// ProtectorOptions creates a slice of all the options for all of the Protectors +// on the Context's mountpoint. +func (ctx *Context) ProtectorOptions() ([]*ProtectorOption, error) { + if err := ctx.checkContext(); err != nil { + return nil, err + } + descriptors, err := ctx.Mount.ListProtectors(ctx.TrustedUser) + if err != nil { + return nil, err + } + + options := make([]*ProtectorOption, len(descriptors)) + for i, descriptor := range descriptors { + options[i] = ctx.getProtectorOption(descriptor) + } + return options, nil +} diff --git a/vendor/github.com/google/fscrypt/actions/policy.go b/vendor/github.com/google/fscrypt/actions/policy.go new file mode 100644 index 000000000..3b2017693 --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/policy.go @@ -0,0 +1,622 @@ +/* + * policy.go - functions for dealing with policies + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package actions + +import ( + "fmt" + "log" + "os" + "os/user" + + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/keyring" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// ErrAccessDeniedPossiblyV2 indicates that a directory's encryption policy +// couldn't be retrieved due to "permission denied", but it looks like it's due +// to the directory using a v2 policy but the kernel not supporting it. +type ErrAccessDeniedPossiblyV2 struct { + DirPath string +} + +func (err *ErrAccessDeniedPossiblyV2) Error() string { + return fmt.Sprintf(` + failed to get encryption policy of %s: permission denied + + This may be caused by the directory using a v2 encryption policy and the + current kernel not supporting it. If indeed the case, then this + directory can only be used on kernel v5.4 and later. You can create + directories accessible on older kernels by changing policy_version to 1 + in %s.`, + err.DirPath, ConfigFileLocation) +} + +// ErrAlreadyProtected indicates that a policy is already protected by the given +// protector. +type ErrAlreadyProtected struct { + Policy *Policy + Protector *Protector +} + +func (err *ErrAlreadyProtected) Error() string { + return fmt.Sprintf("policy %s is already protected by protector %s", + err.Policy.Descriptor(), err.Protector.Descriptor()) +} + +// ErrDifferentFilesystem indicates that a policy can't be applied to a +// directory on a different filesystem. +type ErrDifferentFilesystem struct { + PolicyMount *filesystem.Mount + PathMount *filesystem.Mount +} + +func (err *ErrDifferentFilesystem) Error() string { + return fmt.Sprintf(`cannot apply policy from filesystem %q to a + directory on filesystem %q. Policies may only protect files on the same + filesystem.`, err.PolicyMount.Path, err.PathMount.Path) +} + +// ErrMissingPolicyMetadata indicates that a directory is encrypted but its +// policy metadata cannot be found. +type ErrMissingPolicyMetadata struct { + Mount *filesystem.Mount + DirPath string + Descriptor string +} + +func (err *ErrMissingPolicyMetadata) Error() string { + return fmt.Sprintf(`filesystem %q does not contain the policy metadata + for %q. This directory has either been encrypted with another tool (such + as e4crypt), or the file %q has been deleted.`, + err.Mount.Path, err.DirPath, + err.Mount.PolicyPath(err.Descriptor)) +} + +// ErrNotProtected indicates that the given policy is not protected by the given +// protector. +type ErrNotProtected struct { + PolicyDescriptor string + ProtectorDescriptor string +} + +func (err *ErrNotProtected) Error() string { + return fmt.Sprintf(`policy %s is not protected by protector %s`, + err.PolicyDescriptor, err.ProtectorDescriptor) +} + +// ErrOnlyProtector indicates that the last protector can't be removed from a +// policy. +type ErrOnlyProtector struct { + Policy *Policy +} + +func (err *ErrOnlyProtector) Error() string { + return fmt.Sprintf(`cannot remove the only protector from policy %s. A + policy must have at least one protector.`, err.Policy.Descriptor()) +} + +// ErrPolicyMetadataMismatch indicates that the policy metadata for an encrypted +// directory is inconsistent with that directory. +type ErrPolicyMetadataMismatch struct { + DirPath string + Mount *filesystem.Mount + PathData *metadata.PolicyData + MountData *metadata.PolicyData +} + +func (err *ErrPolicyMetadataMismatch) Error() string { + return fmt.Sprintf(`inconsistent metadata between encrypted directory %q + and its corresponding metadata file %q. + + Directory has descriptor:%s %s + + Metadata file has descriptor:%s %s`, + err.DirPath, err.Mount.PolicyPath(err.PathData.KeyDescriptor), + err.PathData.KeyDescriptor, err.PathData.Options, + err.MountData.KeyDescriptor, err.MountData.Options) +} + +// PurgeAllPolicies removes all policy keys on the filesystem from the kernel +// keyring. In order for this to fully take effect, the filesystem may also need +// to be unmounted or caches dropped. +func PurgeAllPolicies(ctx *Context) error { + if err := ctx.checkContext(); err != nil { + return err + } + policies, err := ctx.Mount.ListPolicies(nil) + if err != nil { + return err + } + + for _, policyDescriptor := range policies { + err = keyring.RemoveEncryptionKey(policyDescriptor, ctx.getKeyringOptions(), false) + switch errors.Cause(err) { + case nil, keyring.ErrKeyNotPresent: + // We don't care if the key has already been removed + case keyring.ErrKeyFilesOpen: + log.Printf("Key for policy %s couldn't be fully removed because some files are still in-use", + policyDescriptor) + case keyring.ErrKeyAddedByOtherUsers: + log.Printf("Key for policy %s couldn't be fully removed because other user(s) have added it too", + policyDescriptor) + default: + return err + } + } + return nil +} + +// Policy represents an unlocked policy, so it contains the PolicyData as well +// as the actual protector key. These unlocked Polices can then be applied to a +// directory, or have their key material inserted into the keyring (which will +// allow encrypted files to be accessed). As with the key struct, a Policy +// should be wiped after use. +type Policy struct { + Context *Context + data *metadata.PolicyData + key *crypto.Key + created bool + ownerIfCreating *user.User + newLinkedProtectors []string +} + +// CreatePolicy creates a Policy protected by given Protector and stores the +// appropriate data on the filesystem. On error, no data is changed on the +// filesystem. +func CreatePolicy(ctx *Context, protector *Protector) (*Policy, error) { + if err := ctx.checkContext(); err != nil { + return nil, err + } + // Randomly create the underlying policy key (and wipe if we fail) + key, err := crypto.NewRandomKey(metadata.PolicyKeyLen) + if err != nil { + return nil, err + } + + keyDescriptor, err := crypto.ComputeKeyDescriptor(key, ctx.Config.Options.PolicyVersion) + if err != nil { + key.Wipe() + return nil, err + } + + policy := &Policy{ + Context: ctx, + data: &metadata.PolicyData{ + Options: ctx.Config.Options, + KeyDescriptor: keyDescriptor, + }, + key: key, + created: true, + } + + policy.ownerIfCreating, err = getOwnerOfMetadataForProtector(protector) + if err != nil { + policy.Lock() + return nil, err + } + + if err = policy.AddProtector(protector); err != nil { + policy.Lock() + return nil, err + } + + return policy, nil +} + +// GetPolicy retrieves a locked policy with a specific descriptor. The Policy is +// still locked in this case, so it must be unlocked before using certain +// methods. +func GetPolicy(ctx *Context, descriptor string) (*Policy, error) { + if err := ctx.checkContext(); err != nil { + return nil, err + } + data, err := ctx.Mount.GetPolicy(descriptor, ctx.TrustedUser) + if err != nil { + return nil, err + } + log.Printf("got data for %s from %q", descriptor, ctx.Mount.Path) + + return &Policy{Context: ctx, data: data}, nil +} + +// GetPolicyFromPath returns the locked policy descriptor for a file on the +// filesystem. The Policy is still locked in this case, so it must be unlocked +// before using certain methods. An error is returned if the metadata is +// inconsistent or the path is not encrypted. +func GetPolicyFromPath(ctx *Context, path string) (*Policy, error) { + if err := ctx.checkContext(); err != nil { + return nil, err + } + + // We double check that the options agree for both the data we get from + // the path, and the data we get from the mountpoint. + pathData, err := metadata.GetPolicy(path) + err = ctx.Mount.EncryptionSupportError(err) + if err != nil { + // On kernels that don't support v2 encryption policies, trying + // to open a directory with a v2 policy simply gave EACCES. This + // is ambiguous with other errors, but try to detect this case + // and show a better error message. + if os.IsPermission(err) && + filesystem.HaveReadAccessTo(path) && + !keyring.IsFsKeyringSupported(ctx.Mount) { + return nil, &ErrAccessDeniedPossiblyV2{path} + } + return nil, err + } + descriptor := pathData.KeyDescriptor + log.Printf("found policy %s for %q", descriptor, path) + + mountData, err := ctx.Mount.GetPolicy(descriptor, ctx.TrustedUser) + if err != nil { + log.Printf("getting policy metadata: %v", err) + if _, ok := err.(*filesystem.ErrPolicyNotFound); ok { + return nil, &ErrMissingPolicyMetadata{ctx.Mount, path, descriptor} + } + return nil, err + } + log.Printf("found data for policy %s on %q", descriptor, ctx.Mount.Path) + + if !proto.Equal(pathData.Options, mountData.Options) || + pathData.KeyDescriptor != mountData.KeyDescriptor { + return nil, &ErrPolicyMetadataMismatch{path, ctx.Mount, pathData, mountData} + } + log.Print("data from filesystem and path agree") + + return &Policy{Context: ctx, data: mountData}, nil +} + +// ProtectorOptions creates a slice of ProtectorOptions for the protectors +// protecting this policy. +func (policy *Policy) ProtectorOptions() []*ProtectorOption { + options := make([]*ProtectorOption, len(policy.data.WrappedPolicyKeys)) + for i, wrappedPolicyKey := range policy.data.WrappedPolicyKeys { + options[i] = policy.Context.getProtectorOption(wrappedPolicyKey.ProtectorDescriptor) + } + return options +} + +// ProtectorDescriptors creates a slice of the Protector descriptors for the +// protectors protecting this policy. +func (policy *Policy) ProtectorDescriptors() []string { + descriptors := make([]string, len(policy.data.WrappedPolicyKeys)) + for i, wrappedPolicyKey := range policy.data.WrappedPolicyKeys { + descriptors[i] = wrappedPolicyKey.ProtectorDescriptor + } + return descriptors +} + +// Descriptor returns the key descriptor for this policy. +func (policy *Policy) Descriptor() string { + return policy.data.KeyDescriptor +} + +// Options returns the encryption options of this policy. +func (policy *Policy) Options() *metadata.EncryptionOptions { + return policy.data.Options +} + +// Version returns the version of this policy. +func (policy *Policy) Version() int64 { + return policy.data.Options.PolicyVersion +} + +// Destroy removes a policy from the filesystem. It also removes any new +// protector links that were created for the policy. This does *not* wipe the +// policy's internal key from memory; use Lock() to do that. +func (policy *Policy) Destroy() error { + for _, protectorDescriptor := range policy.newLinkedProtectors { + policy.Context.Mount.RemoveProtector(protectorDescriptor) + } + return policy.Context.Mount.RemovePolicy(policy.Descriptor()) +} + +// Revert destroys a policy if it was created, but does nothing if it was just +// queried from the filesystem. +func (policy *Policy) Revert() error { + if !policy.created { + return nil + } + return policy.Destroy() +} + +func (policy *Policy) String() string { + return fmt.Sprintf("Policy: %s\nMountpoint: %s\nOptions: %v\nProtectors:%+v", + policy.Descriptor(), policy.Context.Mount, policy.data.Options, + policy.ProtectorDescriptors()) +} + +// Unlock unwraps the Policy's internal key. As a Protector is needed to unlock +// the Policy, callbacks to select the Policy and get the key are needed. This +// method will retry the keyFn as necessary to get the correct key for the +// selected protector. Does nothing if policy is already unlocked. +func (policy *Policy) Unlock(optionFn OptionFunc, keyFn KeyFunc) error { + if policy.key != nil { + return nil + } + options := policy.ProtectorOptions() + + // The OptionFunc indicates which option and wrapped key we should use. + idx, err := optionFn(policy.Descriptor(), options) + if err != nil { + return err + } + option := options[idx] + if option.LoadError != nil { + return option.LoadError + } + + log.Printf("protector %s selected in callback", option.Descriptor()) + protectorKey, err := unwrapProtectorKey(option.ProtectorInfo, keyFn) + if err != nil { + return err + } + defer protectorKey.Wipe() + + log.Printf("unwrapping policy %s with protector", policy.Descriptor()) + wrappedPolicyKey := policy.data.WrappedPolicyKeys[idx].WrappedKey + policy.key, err = crypto.Unwrap(protectorKey, wrappedPolicyKey) + return err +} + +// UnlockWithProtector uses an unlocked Protector to unlock a policy. An error +// is returned if the Protector is not yet unlocked or does not protect the +// policy. Does nothing if policy is already unlocked. +func (policy *Policy) UnlockWithProtector(protector *Protector) error { + if policy.key != nil { + return nil + } + if protector.key == nil { + return ErrLocked + } + idx, ok := policy.findWrappedKeyIndex(protector.Descriptor()) + if !ok { + return &ErrNotProtected{policy.Descriptor(), protector.Descriptor()} + } + + var err error + wrappedPolicyKey := policy.data.WrappedPolicyKeys[idx].WrappedKey + policy.key, err = crypto.Unwrap(protector.key, wrappedPolicyKey) + return err +} + +// Lock wipes a Policy's internal Key. It should always be called after using a +// Policy. This is often done with a defer statement. There is no effect if +// called multiple times. +func (policy *Policy) Lock() error { + err := policy.key.Wipe() + policy.key = nil + return err +} + +// UsesProtector returns if the policy is protected with the protector +func (policy *Policy) UsesProtector(protector *Protector) bool { + _, ok := policy.findWrappedKeyIndex(protector.Descriptor()) + return ok +} + +// getOwnerOfMetadataForProtector returns the User to whom the owner of any new +// policies or protector links for the given protector should be set. +// +// This will return a non-nil value only when the protector is a login protector +// and the process is running as root. In this scenario, root is setting up +// encryption on the user's behalf, so we need to make new policies and +// protector links owned by the user (rather than root) to allow them to be read +// by the user, just like the login protector itself which is handled elsewhere. +func getOwnerOfMetadataForProtector(protector *Protector) (*user.User, error) { + if protector.data.Source == metadata.SourceType_pam_passphrase && util.IsUserRoot() { + owner, err := util.UserFromUID(protector.data.Uid) + if err != nil { + return nil, err + } + return owner, nil + } + return nil, nil +} + +// AddProtector updates the data that is wrapping the Policy Key so that the +// provided Protector is now protecting the specified Policy. If an error is +// returned, no data has been changed. If the policy and protector are on +// different filesystems, a link will be created between them. The policy and +// protector must both be unlocked. +func (policy *Policy) AddProtector(protector *Protector) error { + if policy.UsesProtector(protector) { + return &ErrAlreadyProtected{policy, protector} + } + if policy.key == nil || protector.key == nil { + return ErrLocked + } + + // If the protector is on a different filesystem, we need to add a link + // to it on the policy's filesystem. + if policy.Context.Mount != protector.Context.Mount { + log.Printf("policy on %s\n protector on %s\n", policy.Context.Mount, protector.Context.Mount) + ownerIfCreating, err := getOwnerOfMetadataForProtector(protector) + if err != nil { + return err + } + isNewLink, err := policy.Context.Mount.AddLinkedProtector( + protector.Descriptor(), protector.Context.Mount, + protector.Context.TrustedUser, ownerIfCreating) + if err != nil { + return err + } + if isNewLink { + policy.newLinkedProtectors = append(policy.newLinkedProtectors, + protector.Descriptor()) + } + } else { + log.Printf("policy and protector both on %q", policy.Context.Mount) + } + + // Create the wrapped policy key + wrappedKey, err := crypto.Wrap(protector.key, policy.key) + if err != nil { + return err + } + + // Append the wrapped key to the data + policy.addKey(&metadata.WrappedPolicyKey{ + ProtectorDescriptor: protector.Descriptor(), + WrappedKey: wrappedKey, + }) + + if err := policy.commitData(); err != nil { + // revert the addition on failure + policy.removeKey(len(policy.data.WrappedPolicyKeys) - 1) + return err + } + return nil +} + +// RemoveProtector updates the data that is wrapping the Policy Key so that the +// protector with the given descriptor is no longer protecting the specified +// Policy. If an error is returned, no data has been changed. Note that the +// protector itself won't be removed, nor will a link to the protector be +// removed (in the case where the protector and policy are on different +// filesystems). The policy can be locked or unlocked. +func (policy *Policy) RemoveProtector(protectorDescriptor string) error { + idx, ok := policy.findWrappedKeyIndex(protectorDescriptor) + if !ok { + return &ErrNotProtected{policy.Descriptor(), protectorDescriptor} + } + + if len(policy.data.WrappedPolicyKeys) == 1 { + return &ErrOnlyProtector{policy} + } + + // Remove the wrapped key from the data + toRemove := policy.removeKey(idx) + + if err := policy.commitData(); err != nil { + // revert the removal on failure (order is irrelevant) + policy.addKey(toRemove) + return err + } + return nil +} + +// Apply sets the Policy on a specified directory. Currently we impose the +// additional constraint that policies and the directories they are applied to +// must reside on the same filesystem. +func (policy *Policy) Apply(path string) error { + if pathMount, err := filesystem.FindMount(path); err != nil { + return err + } else if pathMount != policy.Context.Mount { + return &ErrDifferentFilesystem{policy.Context.Mount, pathMount} + } + + err := metadata.SetPolicy(path, policy.data) + return policy.Context.Mount.EncryptionSupportError(err) +} + +// GetProvisioningStatus returns the status of this policy's key in the keyring. +func (policy *Policy) GetProvisioningStatus() keyring.KeyStatus { + status, _ := keyring.GetEncryptionKeyStatus(policy.Descriptor(), + policy.Context.getKeyringOptions()) + return status +} + +// IsProvisionedByTargetUser returns true if the policy's key is present in the +// target kernel keyring, but not if that keyring is a filesystem keyring and +// the key only been added by users other than Context.TargetUser. +func (policy *Policy) IsProvisionedByTargetUser() bool { + return policy.GetProvisioningStatus() == keyring.KeyPresent +} + +// Provision inserts the Policy key into the kernel keyring. This allows reading +// and writing of files encrypted with this directory. Requires unlocked Policy. +func (policy *Policy) Provision() error { + if policy.key == nil { + return ErrLocked + } + return keyring.AddEncryptionKey(policy.key, policy.Descriptor(), + policy.Context.getKeyringOptions()) +} + +// Deprovision removes the Policy key from the kernel keyring. This prevents +// reading and writing to the directory --- unless the target keyring is a user +// keyring, in which case caches must be dropped too. If the Policy key was +// already removed, returns keyring.ErrKeyNotPresent. +func (policy *Policy) Deprovision(allUsers bool) error { + return keyring.RemoveEncryptionKey(policy.Descriptor(), + policy.Context.getKeyringOptions(), allUsers) +} + +// NeedsUserKeyring returns true if Provision and Deprovision for this policy +// will use a user keyring (deprecated), not a filesystem keyring. +func (policy *Policy) NeedsUserKeyring() bool { + return policy.Version() == 1 && !policy.Context.Config.GetUseFsKeyringForV1Policies() +} + +// NeedsRootToProvision returns true if Provision and Deprovision will require +// root for this policy in the current configuration. +func (policy *Policy) NeedsRootToProvision() bool { + return policy.Version() == 1 && policy.Context.Config.GetUseFsKeyringForV1Policies() +} + +// CanBeAppliedWithoutProvisioning returns true if this process can apply this +// policy to a directory without first calling Provision. +func (policy *Policy) CanBeAppliedWithoutProvisioning() bool { + return policy.Version() == 1 || util.IsUserRoot() +} + +// commitData writes the Policy's current data to the filesystem. +func (policy *Policy) commitData() error { + return policy.Context.Mount.AddPolicy(policy.data, policy.ownerIfCreating) +} + +// findWrappedPolicyKey returns the index of the wrapped policy key +// corresponding to this policy and protector. The returned bool is false if no +// wrapped policy key corresponds to the specified protector, true otherwise. +func (policy *Policy) findWrappedKeyIndex(protectorDescriptor string) (int, bool) { + for idx, wrappedPolicyKey := range policy.data.WrappedPolicyKeys { + if wrappedPolicyKey.ProtectorDescriptor == protectorDescriptor { + return idx, true + } + } + return 0, false +} + +// addKey adds the wrapped policy key to end of the wrapped key data. +func (policy *Policy) addKey(toAdd *metadata.WrappedPolicyKey) { + policy.data.WrappedPolicyKeys = append(policy.data.WrappedPolicyKeys, toAdd) +} + +// removeKey removes the wrapped policy key at the specified index. This +// does not preserve the order of the wrapped policy key array. If no index is +// specified the last key is removed. +func (policy *Policy) removeKey(index int) *metadata.WrappedPolicyKey { + lastIdx := len(policy.data.WrappedPolicyKeys) - 1 + toRemove := policy.data.WrappedPolicyKeys[index] + + // See https://github.com/golang/go/wiki/SliceTricks + policy.data.WrappedPolicyKeys[index] = policy.data.WrappedPolicyKeys[lastIdx] + policy.data.WrappedPolicyKeys[lastIdx] = nil + policy.data.WrappedPolicyKeys = policy.data.WrappedPolicyKeys[:lastIdx] + + return toRemove +} diff --git a/vendor/github.com/google/fscrypt/actions/protector.go b/vendor/github.com/google/fscrypt/actions/protector.go new file mode 100644 index 000000000..b986eb020 --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/protector.go @@ -0,0 +1,300 @@ +/* + * protector.go - functions for dealing with protectors + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package actions + +import ( + "fmt" + "log" + "os/user" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// LoginProtectorMountpoint is the mountpoint where login protectors are stored. +// This can be overridden by the user of this package. +var LoginProtectorMountpoint = "/" + +// ErrLoginProtectorExists indicates that a user already has a login protector. +type ErrLoginProtectorExists struct { + User *user.User +} + +func (err *ErrLoginProtectorExists) Error() string { + return fmt.Sprintf("user %q already has a login protector", err.User.Username) +} + +// ErrLoginProtectorName indicates that a name was given for a login protector. +type ErrLoginProtectorName struct { + Name string + User *user.User +} + +func (err *ErrLoginProtectorName) Error() string { + return fmt.Sprintf(`cannot assign name %q to new login protector for + user %q because login protectors are identified by user, not by name.`, + err.Name, err.User.Username) +} + +// ErrMissingProtectorName indicates that a protector name is needed. +type ErrMissingProtectorName struct { + Source metadata.SourceType +} + +func (err *ErrMissingProtectorName) Error() string { + return fmt.Sprintf("%s protectors must be named", err.Source) +} + +// ErrProtectorNameExists indicates that a protector name already exists. +type ErrProtectorNameExists struct { + Name string +} + +func (err *ErrProtectorNameExists) Error() string { + return fmt.Sprintf("there is already a protector named %q", err.Name) +} + +// checkForProtectorWithName returns an error if there is already a protector +// on the filesystem with a specific name (or if we cannot read the necessary +// data). +func checkForProtectorWithName(ctx *Context, name string) error { + options, err := ctx.ProtectorOptions() + if err != nil { + return err + } + for _, option := range options { + if option.Name() == name { + return &ErrProtectorNameExists{name} + } + } + return nil +} + +// checkIfUserHasLoginProtector returns an error if there is already a login +// protector on the filesystem for a specific user (or if we cannot read the +// necessary data). +func checkIfUserHasLoginProtector(ctx *Context, uid int64) error { + options, err := ctx.ProtectorOptions() + if err != nil { + return err + } + for _, option := range options { + if option.Source() == metadata.SourceType_pam_passphrase && option.UID() == uid { + return &ErrLoginProtectorExists{ctx.TargetUser} + } + } + return nil +} + +// Protector represents an unlocked protector, so it contains the ProtectorData +// as well as the actual protector key. These unlocked Protectors are necessary +// to unlock policies and create new polices. As with the key struct, a +// Protector should be wiped after use. +type Protector struct { + Context *Context + data *metadata.ProtectorData + key *crypto.Key + created bool + ownerIfCreating *user.User +} + +// CreateProtector creates an unlocked protector with a given name (name only +// needed for custom and raw protector types). The keyFn provided to create the +// Protector key will only be called once. If an error is returned, no data has +// been changed on the filesystem. +func CreateProtector(ctx *Context, name string, keyFn KeyFunc, owner *user.User) (*Protector, error) { + if err := ctx.checkContext(); err != nil { + return nil, err + } + // Sanity checks for names + if ctx.Config.Source == metadata.SourceType_pam_passphrase { + // login protectors don't need a name (we use the username instead) + if name != "" { + return nil, &ErrLoginProtectorName{name, ctx.TargetUser} + } + } else { + // non-login protectors need a name (so we can distinguish between them) + if name == "" { + return nil, &ErrMissingProtectorName{ctx.Config.Source} + } + // we don't want to duplicate naming + if err := checkForProtectorWithName(ctx, name); err != nil { + return nil, err + } + } + + var err error + protector := &Protector{ + Context: ctx, + data: &metadata.ProtectorData{ + Name: name, + Source: ctx.Config.Source, + }, + created: true, + ownerIfCreating: owner, + } + + // Extra data is needed for some SourceTypes + switch protector.data.Source { + case metadata.SourceType_pam_passphrase: + // As the pam passphrases are user specific, we also store the + // UID for this kind of source. + protector.data.Uid = int64(util.AtoiOrPanic(ctx.TargetUser.Uid)) + // Make sure we aren't duplicating protectors + if err = checkIfUserHasLoginProtector(ctx, protector.data.Uid); err != nil { + return nil, err + } + fallthrough + case metadata.SourceType_custom_passphrase: + // Our passphrase sources need costs and a random salt. + if protector.data.Salt, err = crypto.NewRandomBuffer(metadata.SaltLen); err != nil { + return nil, err + } + + protector.data.Costs = ctx.Config.HashCosts + } + + // Randomly create the underlying protector key (and wipe if we fail) + if protector.key, err = crypto.NewRandomKey(metadata.InternalKeyLen); err != nil { + return nil, err + } + protector.data.ProtectorDescriptor, err = crypto.ComputeKeyDescriptor(protector.key, 1) + if err != nil { + protector.Lock() + return nil, err + } + + if err = protector.Rewrap(keyFn); err != nil { + protector.Lock() + return nil, err + } + + return protector, nil +} + +// GetProtector retrieves a Protector with a specific descriptor. The Protector +// is still locked in this case, so it must be unlocked before using certain +// methods. +func GetProtector(ctx *Context, descriptor string) (*Protector, error) { + log.Printf("Getting protector %s", descriptor) + err := ctx.checkContext() + if err != nil { + return nil, err + } + + protector := &Protector{Context: ctx} + protector.data, err = ctx.Mount.GetRegularProtector(descriptor, ctx.TrustedUser) + return protector, err +} + +// GetProtectorFromOption retrieves a protector based on a protector option. +// If the option had a load error, this function returns that error. The +// Protector is still locked in this case, so it must be unlocked before using +// certain methods. +func GetProtectorFromOption(ctx *Context, option *ProtectorOption) (*Protector, error) { + log.Printf("Getting protector %s from option", option.Descriptor()) + if err := ctx.checkContext(); err != nil { + return nil, err + } + if option.LoadError != nil { + return nil, option.LoadError + } + + // Replace the context if this is a linked protector + if option.LinkedMount != nil { + ctx = &Context{ctx.Config, option.LinkedMount, ctx.TargetUser, ctx.TrustedUser} + } + return &Protector{Context: ctx, data: option.data}, nil +} + +// Descriptor returns the protector descriptor. +func (protector *Protector) Descriptor() string { + return protector.data.ProtectorDescriptor +} + +// Destroy removes a protector from the filesystem. The internal key should +// still be wiped with Lock(). +func (protector *Protector) Destroy() error { + return protector.Context.Mount.RemoveProtector(protector.Descriptor()) +} + +// Revert destroys a protector if it was created, but does nothing if it was +// just queried from the filesystem. +func (protector *Protector) Revert() error { + if !protector.created { + return nil + } + return protector.Destroy() +} + +func (protector *Protector) String() string { + return fmt.Sprintf("Protector: %s\nMountpoint: %s\nSource: %s\nName: %s\nCosts: %v\nUID: %d", + protector.Descriptor(), protector.Context.Mount, protector.data.Source, + protector.data.Name, protector.data.Costs, protector.data.Uid) +} + +// Unlock unwraps the Protector's internal key. The keyFn provided to unwrap the +// Protector key will be retried as necessary to get the correct key. Lock() +// should be called after use. Does nothing if protector is already unlocked. +func (protector *Protector) Unlock(keyFn KeyFunc) (err error) { + if protector.key != nil { + return + } + protector.key, err = unwrapProtectorKey(ProtectorInfo{protector.data}, keyFn) + return +} + +// Lock wipes a Protector's internal Key. It should always be called after using +// an unlocked Protector. This is often done with a defer statement. There is +// no effect if called multiple times. +func (protector *Protector) Lock() error { + err := protector.key.Wipe() + protector.key = nil + return err +} + +// Rewrap updates the data that is wrapping the Protector Key. This is useful if +// a user's password has changed, for example. The keyFn provided to rewrap +// the Protector key will only be called once. Requires unlocked Protector. +func (protector *Protector) Rewrap(keyFn KeyFunc) error { + if protector.key == nil { + return ErrLocked + } + wrappingKey, err := getWrappingKey(ProtectorInfo{protector.data}, keyFn, false) + if err != nil { + return err + } + + // Revert change to wrapped key on failure + oldWrappedKey := protector.data.WrappedKey + defer func() { + wrappingKey.Wipe() + if err != nil { + protector.data.WrappedKey = oldWrappedKey + } + }() + + if protector.data.WrappedKey, err = crypto.Wrap(wrappingKey, protector.key); err != nil { + return err + } + + return protector.Context.Mount.AddProtector(protector.data, protector.ownerIfCreating) +} diff --git a/vendor/github.com/google/fscrypt/actions/recovery.go b/vendor/github.com/google/fscrypt/actions/recovery.go new file mode 100644 index 000000000..8a769cc7e --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/recovery.go @@ -0,0 +1,131 @@ +/* + * recovery.go - support for generating recovery passphrases + * + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package actions + +import ( + "fmt" + "os" + "strconv" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// modifiedContextWithSource returns a copy of ctx with the protector source +// replaced by source. +func modifiedContextWithSource(ctx *Context, source metadata.SourceType) *Context { + modifiedConfig := *ctx.Config + modifiedConfig.Source = source + modifiedCtx := *ctx + modifiedCtx.Config = &modifiedConfig + return &modifiedCtx +} + +// AddRecoveryPassphrase randomly generates a recovery passphrase and adds it as +// a custom_passphrase protector for the given Policy. +func AddRecoveryPassphrase(policy *Policy, dirname string) (*crypto.Key, *Protector, error) { + // 20 random characters in a-z is 94 bits of entropy, which is way more + // than enough for a passphrase which still goes through the usual + // passphrase hashing which makes it extremely costly to brute force. + passphrase, err := crypto.NewRandomPassphrase(20) + if err != nil { + return nil, nil, err + } + defer func() { + if err != nil { + passphrase.Wipe() + } + }() + getPassphraseFn := func(info ProtectorInfo, retry bool) (*crypto.Key, error) { + // CreateProtector() wipes the passphrase, but in this case we + // still need it for later, so make a copy. + return passphrase.Clone() + } + var recoveryProtector *Protector + customCtx := modifiedContextWithSource(policy.Context, metadata.SourceType_custom_passphrase) + seq := 1 + for { + // Automatically generate a name for the recovery protector. + name := "Recovery passphrase for " + dirname + if seq != 1 { + name += " (" + strconv.Itoa(seq) + ")" + } + recoveryProtector, err = CreateProtector(customCtx, name, getPassphraseFn, policy.ownerIfCreating) + if err == nil { + break + } + if _, ok := err.(*ErrProtectorNameExists); !ok { + return nil, nil, err + } + seq++ + } + if err := policy.AddProtector(recoveryProtector); err != nil { + recoveryProtector.Revert() + return nil, nil, err + } + return passphrase, recoveryProtector, nil +} + +// WriteRecoveryInstructions writes a recovery passphrase and instructions to a +// file. This file should initially be located in the encrypted directory +// protected by the passphrase itself. It's up to the user to store the +// passphrase in a different location if they actually need it. +func WriteRecoveryInstructions(recoveryPassphrase *crypto.Key, recoveryProtector *Protector, + policy *Policy, path string) error { + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600) + if err != nil { + return err + } + defer file.Close() + str := fmt.Sprintf( + `fscrypt automatically generated a recovery passphrase for this directory: + + %s + +It did this because you chose to protect this directory with your login +passphrase, but this directory is not on the root filesystem. + +Copy this passphrase to a safe place if you want to still be able to unlock this +directory if you re-install the operating system or connect this storage media +to a different system (which would result in your login protector being lost). + +To unlock this directory using this recovery passphrase, run 'fscrypt unlock' +and select the protector named %q. + +If you want to disable recovery passphrase generation (not recommended), +re-create this directory and pass the --no-recovery option to 'fscrypt encrypt'. +Alternatively, you can remove this recovery passphrase protector using: + + fscrypt metadata remove-protector-from-policy --force --protector=%s:%s --policy=%s:%s + +It is safe to keep it around though, as the recovery passphrase is high-entropy. +`, recoveryPassphrase.Data(), recoveryProtector.data.Name, + recoveryProtector.Context.Mount.Path, recoveryProtector.data.ProtectorDescriptor, + policy.Context.Mount.Path, policy.data.KeyDescriptor) + if _, err = file.WriteString(str); err != nil { + return err + } + if recoveryProtector.ownerIfCreating != nil { + if err = util.Chown(file, recoveryProtector.ownerIfCreating); err != nil { + return err + } + } + return file.Sync() +} diff --git a/vendor/github.com/google/fscrypt/crypto/crypto.go b/vendor/github.com/google/fscrypt/crypto/crypto.go new file mode 100644 index 000000000..1f64b38bb --- /dev/null +++ b/vendor/github.com/google/fscrypt/crypto/crypto.go @@ -0,0 +1,228 @@ +/* + * crypto.go - Cryptographic algorithms used by the rest of fscrypt. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package crypto manages all the cryptography for fscrypt. This includes: +// - Key management (key.go) +// - Securely holding keys in memory +// - Making recovery keys +// - Randomness (rand.go) +// - Cryptographic algorithms (crypto.go) +// - encryption (AES256-CTR) +// - authentication (SHA256-based HMAC) +// - key stretching (SHA256-based HKDF) +// - key wrapping/unwrapping (Encrypt then MAC) +// - passphrase-based key derivation (Argon2id) +// - key descriptor computation (double SHA512, or HKDF-SHA512) +package crypto + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "io" + + "github.com/pkg/errors" + "golang.org/x/crypto/argon2" + "golang.org/x/crypto/hkdf" + + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// Crypto error values +var ( + ErrBadAuth = errors.New("key authentication check failed") + ErrRecoveryCode = errors.New("invalid recovery code") + ErrMlockUlimit = errors.New("could not lock key in memory") +) + +// panicInputLength panics if "name" has invalid length (expected != actual) +func panicInputLength(name string, expected, actual int) { + if err := util.CheckValidLength(expected, actual); err != nil { + panic(errors.Wrap(err, name)) + } +} + +// checkWrappingKey returns an error if the wrapping key has the wrong length +func checkWrappingKey(wrappingKey *Key) error { + err := util.CheckValidLength(metadata.InternalKeyLen, wrappingKey.Len()) + return errors.Wrap(err, "wrapping key") +} + +// stretchKey stretches a key of length InternalKeyLen using unsalted HKDF to +// make two keys of length InternalKeyLen. +func stretchKey(key *Key) (encKey, authKey *Key) { + panicInputLength("hkdf key", metadata.InternalKeyLen, key.Len()) + + // The new hkdf function uses the hash and key to create a reader that + // can be used to securely initialize multiple keys. This means that + // reads on the hkdf give independent cryptographic keys. The hkdf will + // also always have enough entropy to read two keys. + hkdf := hkdf.New(sha256.New, key.data, nil, nil) + + encKey, err := NewFixedLengthKeyFromReader(hkdf, metadata.InternalKeyLen) + util.NeverError(err) + authKey, err = NewFixedLengthKeyFromReader(hkdf, metadata.InternalKeyLen) + util.NeverError(err) + + return +} + +// aesCTR runs AES256-CTR on the input using the provided key and iv. This +// function can be used to either encrypt or decrypt input of any size. Note +// that input and output must be the same size. +func aesCTR(key *Key, iv, input, output []byte) { + panicInputLength("aesCTR key", metadata.InternalKeyLen, key.Len()) + panicInputLength("aesCTR iv", metadata.IVLen, len(iv)) + panicInputLength("aesCTR output", len(input), len(output)) + + blockCipher, err := aes.NewCipher(key.data) + util.NeverError(err) // Key is checked to have correct length + + stream := cipher.NewCTR(blockCipher, iv) + stream.XORKeyStream(output, input) +} + +// getHMAC returns the SHA256-based HMAC of some data using the provided key. +func getHMAC(key *Key, data ...[]byte) []byte { + panicInputLength("hmac key", metadata.InternalKeyLen, key.Len()) + + mac := hmac.New(sha256.New, key.data) + for _, buffer := range data { + // SHA256 HMAC should never be unable to write the data + _, err := mac.Write(buffer) + util.NeverError(err) + } + + return mac.Sum(nil) +} + +// Wrap takes a wrapping Key of length InternalKeyLen, and uses it to wrap a +// secret Key of any length. This wrapping uses a random IV, the encrypted data, +// and an HMAC to verify the wrapping key was correct. All of this is included +// in the returned WrappedKeyData structure. +func Wrap(wrappingKey, secretKey *Key) (*metadata.WrappedKeyData, error) { + if err := checkWrappingKey(wrappingKey); err != nil { + return nil, err + } + + data := &metadata.WrappedKeyData{EncryptedKey: make([]byte, secretKey.Len())} + + // Get random IV + var err error + if data.IV, err = NewRandomBuffer(metadata.IVLen); err != nil { + return nil, err + } + + // Stretch key for encryption and authentication (unsalted). + encKey, authKey := stretchKey(wrappingKey) + defer encKey.Wipe() + defer authKey.Wipe() + + // Encrypt the secret and include the HMAC of the output ("Encrypt-then-MAC"). + aesCTR(encKey, data.IV, secretKey.data, data.EncryptedKey) + + data.Hmac = getHMAC(authKey, data.IV, data.EncryptedKey) + return data, nil +} + +// Unwrap takes a wrapping Key of length InternalKeyLen, and uses it to unwrap +// the WrappedKeyData to get the unwrapped secret Key. The Wrapped Key data +// includes an authentication check, so an error will be returned if that check +// fails. +func Unwrap(wrappingKey *Key, data *metadata.WrappedKeyData) (*Key, error) { + if err := checkWrappingKey(wrappingKey); err != nil { + return nil, err + } + + // Stretch key for encryption and authentication (unsalted). + encKey, authKey := stretchKey(wrappingKey) + defer encKey.Wipe() + defer authKey.Wipe() + + // Check validity of the HMAC + if !hmac.Equal(getHMAC(authKey, data.IV, data.EncryptedKey), data.Hmac) { + return nil, ErrBadAuth + } + + secretKey, err := NewBlankKey(len(data.EncryptedKey)) + if err != nil { + return nil, err + } + aesCTR(encKey, data.IV, data.EncryptedKey, secretKey.data) + + return secretKey, nil +} + +func computeKeyDescriptorV1(key *Key) string { + h1 := sha512.Sum512(key.data) + h2 := sha512.Sum512(h1[:]) + length := hex.DecodedLen(metadata.PolicyDescriptorLenV1) + return hex.EncodeToString(h2[:length]) +} + +func computeKeyDescriptorV2(key *Key) (string, error) { + // This algorithm is specified by the kernel. It uses unsalted + // HKDF-SHA512, where the application-information string is the prefix + // "fscrypt\0" followed by the HKDF_CONTEXT_KEY_IDENTIFIER byte. + hkdf := hkdf.New(sha512.New, key.data, nil, []byte("fscrypt\x00\x01")) + h := make([]byte, hex.DecodedLen(metadata.PolicyDescriptorLenV2)) + if _, err := io.ReadFull(hkdf, h); err != nil { + return "", err + } + return hex.EncodeToString(h), nil +} + +// ComputeKeyDescriptor computes the descriptor for a given cryptographic key. +// If policyVersion=1, it uses the first 8 bytes of the double application of +// SHA512 on the key. Use this for protectors and v1 policy keys. +// If policyVersion=2, it uses HKDF-SHA512 to compute a key identifier that's +// compatible with the kernel's key identifiers for v2 policy keys. +// In both cases, the resulting bytes are formatted as hex. +func ComputeKeyDescriptor(key *Key, policyVersion int64) (string, error) { + switch policyVersion { + case 1: + return computeKeyDescriptorV1(key), nil + case 2: + return computeKeyDescriptorV2(key) + default: + return "", errors.Errorf("policy version of %d is invalid", policyVersion) + } +} + +// PassphraseHash uses Argon2id to produce a Key given the passphrase, salt, and +// hashing costs. This method is designed to take a long time and consume +// considerable memory. For more information, see the documentation at +// https://godoc.org/golang.org/x/crypto/argon2. +func PassphraseHash(passphrase *Key, salt []byte, costs *metadata.HashingCosts) (*Key, error) { + t := uint32(costs.Time) + m := uint32(costs.Memory) + p := uint8(costs.Parallelism) + key := argon2.IDKey(passphrase.data, salt, t, m, p, metadata.InternalKeyLen) + + hash, err := NewBlankKey(metadata.InternalKeyLen) + if err != nil { + return nil, err + } + copy(hash.data, key) + return hash, nil +} diff --git a/vendor/github.com/google/fscrypt/crypto/key.go b/vendor/github.com/google/fscrypt/crypto/key.go new file mode 100644 index 000000000..2e5744336 --- /dev/null +++ b/vendor/github.com/google/fscrypt/crypto/key.go @@ -0,0 +1,354 @@ +/* + * key.go - Cryptographic key management for fscrypt. Ensures that sensitive + * material is properly handled throughout the program. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package crypto + +/* +#include +#include +*/ +import "C" + +import ( + "bytes" + "crypto/subtle" + "encoding/base32" + "io" + "log" + "os" + "runtime" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +const ( + // Keys need to readable and writable, but hidden from other processes. + keyProtection = unix.PROT_READ | unix.PROT_WRITE + keyMmapFlags = unix.MAP_PRIVATE | unix.MAP_ANONYMOUS +) + +/* +UseMlock determines whether we should use the mlock/munlock syscalls to +prevent sensitive data like keys and passphrases from being paged to disk. +UseMlock defaults to true, but can be set to false if the application calling +into this library has insufficient privileges to lock memory. Code using this +package could also bind this setting to a flag by using: + + flag.BoolVar(&crypto.UseMlock, "lock-memory", true, "lock keys in memory") +*/ +var UseMlock = true + +/* +Key protects some arbitrary buffer of cryptographic material. Its methods +ensure that the Key's data is locked in memory before being used (if +UseMlock is set to true), and is wiped and unlocked after use (via the Wipe() +method). This data is never accessed outside of the fscrypt/crypto package +(except for the UnsafeData method). If a key is successfully created, the +Wipe() method should be called after it's use. For example: + + func UseKeyFromStdin() error { + key, err := NewKeyFromReader(os.Stdin) + if err != nil { + return err + } + defer key.Wipe() + + // Do stuff with key + + return nil + } + +The Wipe() method will also be called when a key is garbage collected; however, +it is best practice to clear the key as soon as possible, so it spends a minimal +amount of time in memory. + +Note that Key is not thread safe, as a key could be wiped while another thread +is using it. Also, calling Wipe() from two threads could cause an error as +memory could be freed twice. +*/ +type Key struct { + data []byte +} + +// NewBlankKey constructs a blank key of a specified length and returns an error +// if we are unable to allocate or lock the necessary memory. +func NewBlankKey(length int) (*Key, error) { + if length == 0 { + return &Key{data: nil}, nil + } else if length < 0 { + return nil, errors.Errorf("requested key length %d is negative", length) + } + + flags := keyMmapFlags + if UseMlock { + flags |= unix.MAP_LOCKED + } + + // See MAP_ANONYMOUS in http://man7.org/linux/man-pages/man2/mmap.2.html + data, err := unix.Mmap(-1, 0, length, keyProtection, flags) + if err == unix.EAGAIN { + return nil, ErrMlockUlimit + } + if err != nil { + return nil, errors.Wrapf(err, + "failed to allocate (mmap) key buffer of length %d", length) + } + + key := &Key{data: data} + + // Backup finalizer in case user forgets to "defer key.Wipe()" + runtime.SetFinalizer(key, (*Key).Wipe) + return key, nil +} + +// Wipe destroys a Key by zeroing and freeing the memory. The data is zeroed +// even if Wipe returns an error, which occurs if we are unable to unlock or +// free the key memory. Wipe does nothing if the key is already wiped or is nil. +func (key *Key) Wipe() error { + // We do nothing if key or key.data is nil so that Wipe() is idempotent + // and so Wipe() can be called on keys which have already been cleared. + if key != nil && key.data != nil { + data := key.data + key.data = nil + + for i := range data { + data[i] = 0 + } + + if err := unix.Munmap(data); err != nil { + log.Printf("unix.Munmap() failed: %v", err) + return errors.Wrapf(err, "failed to free (munmap) key buffer") + } + } + return nil +} + +// Len is the underlying data buffer's length. +func (key *Key) Len() int { + return len(key.data) +} + +// Equals compares the contents of two keys, returning true if they have the same +// key data. This function runs in constant time. +func (key *Key) Equals(key2 *Key) bool { + return subtle.ConstantTimeCompare(key.data, key2.data) == 1 +} + +// resize returns a new key with size requestedSize and the appropriate data +// copied over. The original data is wiped. This method does nothing and returns +// itself if the key's length equals requestedSize. +func (key *Key) resize(requestedSize int) (*Key, error) { + if key.Len() == requestedSize { + return key, nil + } + defer key.Wipe() + + resizedKey, err := NewBlankKey(requestedSize) + if err != nil { + return nil, err + } + copy(resizedKey.data, key.data) + return resizedKey, nil +} + +// Data returns a slice of the key's underlying data. Note that this may become +// outdated if the key is resized. +func (key *Key) Data() []byte { + return key.data +} + +// UnsafePtr returns an unsafe pointer to the key's underlying data. Note that +// this will only be valid as long as the key is not resized. +func (key *Key) UnsafePtr() unsafe.Pointer { + return util.Ptr(key.data) +} + +// UnsafeToCString makes a copy of the string's data into a null-terminated C +// string allocated by C. Note that this method is unsafe as this C copy has no +// locking or wiping functionality. The key shouldn't contain any `\0` bytes. +func (key *Key) UnsafeToCString() unsafe.Pointer { + size := C.size_t(key.Len()) + data := C.calloc(size+1, 1) + C.memcpy(data, util.Ptr(key.data), size) + return data +} + +// Clone creates a key as a copy of another one. +func (key *Key) Clone() (*Key, error) { + newKey, err := NewBlankKey(key.Len()) + if err != nil { + return nil, err + } + copy(newKey.data, key.data) + return newKey, nil +} + +// NewKeyFromCString creates of a copy of some C string's data in a key. Note +// that the original C string is not modified at all, so steps must be taken to +// ensure that this original copy is secured. +func NewKeyFromCString(str unsafe.Pointer) (*Key, error) { + size := C.strlen((*C.char)(str)) + key, err := NewBlankKey(int(size)) + if err != nil { + return nil, err + } + C.memcpy(util.Ptr(key.data), str, size) + return key, nil +} + +// NewKeyFromReader constructs a key of arbitrary length by reading from reader +// until hitting EOF. +func NewKeyFromReader(reader io.Reader) (*Key, error) { + // Use an initial key size of a page. As Mmap allocates a page anyway, + // there isn't much additional overhead from starting with a whole page. + key, err := NewBlankKey(os.Getpagesize()) + if err != nil { + return nil, err + } + + totalBytesRead := 0 + for { + bytesRead, err := reader.Read(key.data[totalBytesRead:]) + totalBytesRead += bytesRead + + switch err { + case nil: + // Need to continue reading. Grow key if necessary + if key.Len() == totalBytesRead { + if key, err = key.resize(2 * key.Len()); err != nil { + return nil, err + } + } + case io.EOF: + // Getting the EOF error means we are done + return key.resize(totalBytesRead) + default: + // Fail if Read() has a failure + key.Wipe() + return nil, err + } + } +} + +// NewFixedLengthKeyFromReader constructs a key with a specified length by +// reading exactly length bytes from reader. +func NewFixedLengthKeyFromReader(reader io.Reader, length int) (*Key, error) { + key, err := NewBlankKey(length) + if err != nil { + return nil, err + } + if _, err := io.ReadFull(reader, key.data); err != nil { + key.Wipe() + return nil, err + } + return key, nil +} + +var ( + // The recovery code is base32 with a dash between each block of 8 characters. + encoding = base32.StdEncoding + blockSize = 8 + separator = []byte("-") + encodedLength = encoding.EncodedLen(metadata.PolicyKeyLen) + decodedLength = encoding.DecodedLen(encodedLength) + // RecoveryCodeLength is the number of bytes in every recovery code + RecoveryCodeLength = (encodedLength/blockSize)*(blockSize+len(separator)) - len(separator) +) + +// WriteRecoveryCode outputs key's recovery code to the provided writer. +// WARNING: This recovery key is enough to derive the original key, so it must +// be given the same level of protection as a raw cryptographic key. +func WriteRecoveryCode(key *Key, writer io.Writer) error { + if err := util.CheckValidLength(metadata.PolicyKeyLen, key.Len()); err != nil { + return errors.Wrap(err, "recovery key") + } + + // We store the base32 encoded data (without separators) in a temp key + encodedKey, err := NewBlankKey(encodedLength) + if err != nil { + return err + } + defer encodedKey.Wipe() + encoding.Encode(encodedKey.data, key.data) + + w := util.NewErrWriter(writer) + + // Write the blocks with separators between them + w.Write(encodedKey.data[:blockSize]) + for blockStart := blockSize; blockStart < encodedLength; blockStart += blockSize { + w.Write(separator) + + blockEnd := util.MinInt(blockStart+blockSize, encodedLength) + w.Write(encodedKey.data[blockStart:blockEnd]) + } + + // If any writes have failed, return the error + return w.Err() +} + +// ReadRecoveryCode gets the recovery code from the provided reader and returns +// the corresponding cryptographic key. +// WARNING: This recovery key is enough to derive the original key, so it must +// be given the same level of protection as a raw cryptographic key. +func ReadRecoveryCode(reader io.Reader) (*Key, error) { + // We store the base32 encoded data (without separators) in a temp key + encodedKey, err := NewBlankKey(encodedLength) + if err != nil { + return nil, err + } + defer encodedKey.Wipe() + + r := util.NewErrReader(reader) + + // Read the other blocks, checking the separators between them + r.Read(encodedKey.data[:blockSize]) + inputSeparator := make([]byte, len(separator)) + + for blockStart := blockSize; blockStart < encodedLength; blockStart += blockSize { + r.Read(inputSeparator) + if r.Err() == nil && !bytes.Equal(separator, inputSeparator) { + err = errors.Wrapf(ErrRecoveryCode, "invalid separator %q", inputSeparator) + return nil, err + } + + blockEnd := util.MinInt(blockStart+blockSize, encodedLength) + r.Read(encodedKey.data[blockStart:blockEnd]) + } + + // If any reads have failed, return the error + if r.Err() != nil { + return nil, errors.Wrapf(ErrRecoveryCode, "read error %v", r.Err()) + } + + // Now we decode the key, resizing if necessary + decodedKey, err := NewBlankKey(decodedLength) + if err != nil { + return nil, err + } + if _, err = encoding.Decode(decodedKey.data, encodedKey.data); err != nil { + return nil, errors.Wrap(ErrRecoveryCode, err.Error()) + } + return decodedKey.resize(metadata.PolicyKeyLen) +} diff --git a/vendor/github.com/google/fscrypt/crypto/rand.go b/vendor/github.com/google/fscrypt/crypto/rand.go new file mode 100644 index 000000000..7d1e55bf0 --- /dev/null +++ b/vendor/github.com/google/fscrypt/crypto/rand.go @@ -0,0 +1,98 @@ +/* + * rand.go - Reader used to generate secure random data for fscrypt. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package crypto + +import ( + "io" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// NewRandomBuffer uses the Linux Getrandom() syscall to create random bytes. If +// the operating system has insufficient randomness, the buffer creation will +// fail. This is an improvement over Go's built-in crypto/rand which will still +// return bytes if the system has insufficiency entropy. +// See: https://github.com/golang/go/issues/19274 +// +// While this syscall was only introduced in Kernel v3.17, it predates the +// introduction of filesystem encryption, so it introduces no additional +// compatibility issues. +func NewRandomBuffer(length int) ([]byte, error) { + buffer := make([]byte, length) + if _, err := io.ReadFull(randReader{}, buffer); err != nil { + return nil, err + } + return buffer, nil +} + +// NewRandomKey creates a random key of the specified length. This function uses +// the same random number generation process as NewRandomBuffer. +func NewRandomKey(length int) (*Key, error) { + return NewFixedLengthKeyFromReader(randReader{}, length) +} + +// NewRandomPassphrase creates a random passphrase of the specified length +// containing random alphabetic characters. +func NewRandomPassphrase(length int) (*Key, error) { + chars := []byte("abcdefghijklmnopqrstuvwxyz") + passphrase, err := NewBlankKey(length) + if err != nil { + return nil, err + } + for i := 0; i < length; { + // Get some random bytes. + raw, err := NewRandomKey((length - i) * 2) + if err != nil { + return nil, err + } + // Translate the random bytes into random characters. + for _, b := range raw.data { + if int(b) >= 256-(256%len(chars)) { + // Avoid bias towards the first characters in the list. + continue + } + c := chars[int(b)%len(chars)] + passphrase.data[i] = c + i++ + if i == length { + break + } + } + raw.Wipe() + } + return passphrase, nil +} + +// randReader just calls into Getrandom, so no internal data is needed. +type randReader struct{} + +func (r randReader) Read(buffer []byte) (int, error) { + n, err := unix.Getrandom(buffer, unix.GRND_NONBLOCK) + switch err { + case nil: + return n, nil + case unix.EAGAIN: + err = errors.New("insufficient entropy in pool") + case unix.ENOSYS: + err = errors.New("kernel must be v3.17 or later") + } + return 0, errors.Wrap(err, "getrandom() failed") +} diff --git a/vendor/github.com/google/fscrypt/filesystem/filesystem.go b/vendor/github.com/google/fscrypt/filesystem/filesystem.go new file mode 100644 index 000000000..27bfa2415 --- /dev/null +++ b/vendor/github.com/google/fscrypt/filesystem/filesystem.go @@ -0,0 +1,1088 @@ +/* + * filesystem.go - Contains the functionality for a specific filesystem. This + * includes the commands to setup the filesystem, apply policies, and locate + * metadata. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package filesystem deals with the structure of the files on disk used to +// store the metadata for fscrypt. Specifically, this package includes: +// - mountpoint management (mountpoint.go) +// - querying existing mounted filesystems +// - getting filesystems from a UUID +// - finding the filesystem for a specific path +// - metadata organization (filesystem.go) +// - setting up a mounted filesystem for use with fscrypt +// - adding/querying/deleting metadata +// - making links to other filesystems' metadata +// - following links to get data from other filesystems +package filesystem + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "os" + "os/user" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// ErrAlreadySetup indicates that a filesystem is already setup for fscrypt. +type ErrAlreadySetup struct { + Mount *Mount +} + +func (err *ErrAlreadySetup) Error() string { + return fmt.Sprintf("filesystem %s is already setup for use with fscrypt", + err.Mount.Path) +} + +// ErrCorruptMetadata indicates that an fscrypt metadata file is corrupt. +type ErrCorruptMetadata struct { + Path string + UnderlyingError error +} + +func (err *ErrCorruptMetadata) Error() string { + return fmt.Sprintf("fscrypt metadata file at %q is corrupt: %s", + err.Path, err.UnderlyingError) +} + +// ErrFollowLink indicates that a protector link can't be followed. +type ErrFollowLink struct { + Link string + UnderlyingError error +} + +func (err *ErrFollowLink) Error() string { + return fmt.Sprintf("cannot follow filesystem link %q: %s", + err.Link, err.UnderlyingError) +} + +// ErrInsecurePermissions indicates that a filesystem is not considered to be +// setup for fscrypt because a metadata directory has insecure permissions. +type ErrInsecurePermissions struct { + Path string +} + +func (err *ErrInsecurePermissions) Error() string { + return fmt.Sprintf("%q has insecure permissions (world-writable without sticky bit)", + err.Path) +} + +// ErrMakeLink indicates that a protector link can't be created. +type ErrMakeLink struct { + Target *Mount + UnderlyingError error +} + +func (err *ErrMakeLink) Error() string { + return fmt.Sprintf("cannot create filesystem link to %q: %s", + err.Target.Path, err.UnderlyingError) +} + +// ErrMountOwnedByAnotherUser indicates that the mountpoint root directory is +// owned by a user that isn't trusted in the current context, so we don't +// consider fscrypt to be properly setup on the filesystem. +type ErrMountOwnedByAnotherUser struct { + Mount *Mount +} + +func (err *ErrMountOwnedByAnotherUser) Error() string { + return fmt.Sprintf("another non-root user owns the root directory of %s", err.Mount.Path) +} + +// ErrNoCreatePermission indicates that the current user lacks permission to +// create fscrypt metadata on the given filesystem. +type ErrNoCreatePermission struct { + Mount *Mount +} + +func (err *ErrNoCreatePermission) Error() string { + return fmt.Sprintf("user lacks permission to create fscrypt metadata on %s", err.Mount.Path) +} + +// ErrNotAMountpoint indicates that a path is not a mountpoint. +type ErrNotAMountpoint struct { + Path string +} + +func (err *ErrNotAMountpoint) Error() string { + return fmt.Sprintf("%q is not a mountpoint", err.Path) +} + +// ErrNotSetup indicates that a filesystem is not setup for fscrypt. +type ErrNotSetup struct { + Mount *Mount +} + +func (err *ErrNotSetup) Error() string { + return fmt.Sprintf("filesystem %s is not setup for use with fscrypt", err.Mount.Path) +} + +// ErrSetupByAnotherUser indicates that one or more of the fscrypt metadata +// directories is owned by a user that isn't trusted in the current context, so +// we don't consider fscrypt to be properly setup on the filesystem. +type ErrSetupByAnotherUser struct { + Mount *Mount +} + +func (err *ErrSetupByAnotherUser) Error() string { + return fmt.Sprintf("another non-root user owns fscrypt metadata directories on %s", err.Mount.Path) +} + +// ErrSetupNotSupported indicates that the given filesystem type is not +// supported for fscrypt setup. +type ErrSetupNotSupported struct { + Mount *Mount +} + +func (err *ErrSetupNotSupported) Error() string { + return fmt.Sprintf("filesystem type %s is not supported for fscrypt setup", + err.Mount.FilesystemType) +} + +// ErrPolicyNotFound indicates that the policy metadata was not found. +type ErrPolicyNotFound struct { + Descriptor string + Mount *Mount +} + +func (err *ErrPolicyNotFound) Error() string { + return fmt.Sprintf("policy metadata for %s not found on filesystem %s", + err.Descriptor, err.Mount.Path) +} + +// ErrProtectorNotFound indicates that the protector metadata was not found. +type ErrProtectorNotFound struct { + Descriptor string + Mount *Mount +} + +func (err *ErrProtectorNotFound) Error() string { + return fmt.Sprintf("protector metadata for %s not found on filesystem %s", + err.Descriptor, err.Mount.Path) +} + +// SortDescriptorsByLastMtime indicates whether descriptors are sorted by last +// modification time when being listed. This can be set to true to get +// consistent output for testing. +var SortDescriptorsByLastMtime = false + +// Mount contains information for a specific mounted filesystem. +// Path - Absolute path where the directory is mounted +// FilesystemType - Type of the mounted filesystem, e.g. "ext4" +// Device - Device for filesystem (empty string if we cannot find one) +// DeviceNumber - Device number of the filesystem. This is set even if +// Device isn't, since all filesystems have a device +// number assigned by the kernel, even pseudo-filesystems. +// Subtree - The mounted subtree of the filesystem. This is usually +// "/", meaning that the entire filesystem is mounted, but +// it can differ for bind mounts. +// ReadOnly - True if this is a read-only mount +// +// In order to use a Mount to store fscrypt metadata, some directories must be +// setup first. Specifically, the directories created look like: +// +// └── .fscrypt +// ├── policies +// └── protectors +// +// These "policies" and "protectors" directories will contain files that are +// the corresponding metadata structures for policies and protectors. The public +// interface includes functions for setting up these directories and Adding, +// Getting, and Removing these files. +// +// There is also the ability to reference another filesystem's metadata. This is +// used when a Policy on filesystem A is protected with Protector on filesystem +// B. In this scenario, we store a "link file" in the protectors directory. +// +// We also allow ".fscrypt" to be a symlink which was previously created. This +// allows login protectors to be created when the root filesystem is read-only, +// provided that "/.fscrypt" is a symlink pointing to a writable location. +type Mount struct { + Path string + FilesystemType string + Device string + DeviceNumber DeviceNumber + Subtree string + ReadOnly bool +} + +// PathSorter allows mounts to be sorted by Path. +type PathSorter []*Mount + +func (p PathSorter) Len() int { return len(p) } +func (p PathSorter) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p PathSorter) Less(i, j int) bool { return p[i].Path < p[j].Path } + +const ( + // Names of the various directories used in fscrypt + baseDirName = ".fscrypt" + policyDirName = "policies" + protectorDirName = "protectors" + tempPrefix = ".tmp" + linkFileExtension = ".link" + + // The base directory should be read-only (except for the creator) + basePermissions = 0755 + + // The metadata files shouldn't be readable or writable by other users. + // Having them be world-readable wouldn't necessarily be a huge issue, + // but given that some of these files contain (strong) password hashes, + // we error on the side of caution -- similar to /etc/shadow. + // Note: existing files on-disk might have mode 0644, as that was the + // mode used by fscrypt v0.3.2 and earlier. + filePermissions = os.FileMode(0600) + + // Maximum size of a metadata file. This value is arbitrary, and it can + // be changed. We just set a reasonable limit that shouldn't be reached + // in practice, except by users trying to cause havoc by creating + // extremely large files in the metadata directories. + maxMetadataFileSize = 16384 +) + +// SetupMode is a mode for creating the fscrypt metadata directories. +type SetupMode int + +const ( + // SingleUserWritable specifies to make the fscrypt metadata directories + // writable by a single user (usually root) only. + SingleUserWritable SetupMode = iota + // WorldWritable specifies to make the fscrypt metadata directories + // world-writable (with the sticky bit set). + WorldWritable +) + +func (m *Mount) String() string { + return fmt.Sprintf(`%s + FilesystemType: %s + Device: %s`, m.Path, m.FilesystemType, m.Device) +} + +// BaseDir returns the path to the base fscrypt directory for this filesystem. +func (m *Mount) BaseDir() string { + rawBaseDir := filepath.Join(m.Path, baseDirName) + // We allow the base directory to be a symlink, but some callers need + // the real path, so dereference the symlink here if needed. Since the + // directory the symlink points to may not exist yet, we have to read + // the symlink manually rather than use filepath.EvalSymlinks. + target, err := os.Readlink(rawBaseDir) + if err != nil { + return rawBaseDir // not a symlink + } + if filepath.IsAbs(target) { + return target + } + return filepath.Join(m.Path, target) +} + +// ProtectorDir returns the directory containing the protector metadata. +func (m *Mount) ProtectorDir() string { + return filepath.Join(m.BaseDir(), protectorDirName) +} + +// protectorPath returns the full path to a regular protector file with the +// specified descriptor. +func (m *Mount) protectorPath(descriptor string) string { + return filepath.Join(m.ProtectorDir(), descriptor) +} + +// linkedProtectorPath returns the full path to a linked protector file with the +// specified descriptor. +func (m *Mount) linkedProtectorPath(descriptor string) string { + return m.protectorPath(descriptor) + linkFileExtension +} + +// PolicyDir returns the directory containing the policy metadata. +func (m *Mount) PolicyDir() string { + return filepath.Join(m.BaseDir(), policyDirName) +} + +// PolicyPath returns the full path to a regular policy file with the +// specified descriptor. +func (m *Mount) PolicyPath(descriptor string) string { + return filepath.Join(m.PolicyDir(), descriptor) +} + +// tempMount creates a temporary directory alongside this Mount's base fscrypt +// directory and returns a temporary Mount which represents this temporary +// directory. The caller is responsible for removing this temporary directory. +func (m *Mount) tempMount() (*Mount, error) { + tempDir, err := ioutil.TempDir(filepath.Dir(m.BaseDir()), tempPrefix) + return &Mount{Path: tempDir}, err +} + +// ErrEncryptionNotEnabled indicates that encryption is not enabled on the given +// filesystem. +type ErrEncryptionNotEnabled struct { + Mount *Mount +} + +func (err *ErrEncryptionNotEnabled) Error() string { + return fmt.Sprintf("encryption not enabled on filesystem %s (%s).", + err.Mount.Path, err.Mount.Device) +} + +// ErrEncryptionNotSupported indicates that encryption is not supported on the +// given filesystem. +type ErrEncryptionNotSupported struct { + Mount *Mount +} + +func (err *ErrEncryptionNotSupported) Error() string { + return fmt.Sprintf("This kernel doesn't support encryption on %s filesystems.", + err.Mount.FilesystemType) +} + +// EncryptionSupportError adds filesystem-specific context to the +// ErrEncryptionNotEnabled and ErrEncryptionNotSupported errors from the +// metadata package. +func (m *Mount) EncryptionSupportError(err error) error { + switch err { + case metadata.ErrEncryptionNotEnabled: + return &ErrEncryptionNotEnabled{m} + case metadata.ErrEncryptionNotSupported: + return &ErrEncryptionNotSupported{m} + } + return err +} + +// isFscryptSetupAllowed decides whether the given filesystem is allowed to be +// set up for fscrypt, without actually accessing it. This basically checks +// whether the filesystem type is one of the types that supports encryption, or +// at least is in some stage of planning for encrption support in the future. +// +// We need this list so that we can skip filesystems that are irrelevant for +// fscrypt without having to look for the fscrypt metadata directories on them, +// which can trigger errors, long delays, or side effects on some filesystems. +// +// Unfortunately, this means that if a completely new filesystem adds encryption +// support, then it will need to be manually added to this list. But it seems +// to be a worthwhile tradeoff to avoid the above issues. +func (m *Mount) isFscryptSetupAllowed() bool { + if m.Path == "/" { + // The root filesystem is always allowed, since it's where login + // protectors are stored. + return true + } + switch m.FilesystemType { + case "ext4", "f2fs", "ubifs", "btrfs", "ceph", "xfs": + return true + default: + return false + } +} + +// CheckSupport returns an error if this filesystem does not support encryption. +func (m *Mount) CheckSupport() error { + if !m.isFscryptSetupAllowed() { + return &ErrEncryptionNotSupported{m} + } + return m.EncryptionSupportError(metadata.CheckSupport(m.Path)) +} + +func checkOwnership(path string, info os.FileInfo, trustedUser *user.User) bool { + if trustedUser == nil { + return true + } + trustedUID := uint32(util.AtoiOrPanic(trustedUser.Uid)) + actualUID := info.Sys().(*syscall.Stat_t).Uid + if actualUID != 0 && actualUID != trustedUID { + log.Printf("WARNING: %q is owned by uid %d, but expected %d or 0", + path, actualUID, trustedUID) + return false + } + return true +} + +// CheckSetup returns an error if any of the fscrypt metadata directories do not +// exist. Will log any unexpected errors or incorrect permissions. +func (m *Mount) CheckSetup(trustedUser *user.User) error { + if !m.isFscryptSetupAllowed() { + return &ErrNotSetup{m} + } + // Check that the mountpoint directory itself is not a symlink and has + // proper ownership, as otherwise we can't trust anything beneath it. + info, err := loggedLstat(m.Path) + if err != nil { + return &ErrNotSetup{m} + } + if (info.Mode() & os.ModeSymlink) != 0 { + log.Printf("mountpoint directory %q cannot be a symlink", m.Path) + return &ErrNotSetup{m} + } + if !info.IsDir() { + log.Printf("mountpoint %q is not a directory", m.Path) + return &ErrNotSetup{m} + } + if !checkOwnership(m.Path, info, trustedUser) { + return &ErrMountOwnedByAnotherUser{m} + } + + // Check BaseDir similarly. However, unlike the other directories, we + // allow BaseDir to be a symlink, to support the use case of metadata + // for a read-only filesystem being redirected to a writable location. + info, err = loggedStat(m.BaseDir()) + if err != nil { + return &ErrNotSetup{m} + } + if !info.IsDir() { + log.Printf("%q is not a directory", m.BaseDir()) + return &ErrNotSetup{m} + } + if !checkOwnership(m.Path, info, trustedUser) { + return &ErrMountOwnedByAnotherUser{m} + } + + // Check that the policies and protectors directories aren't symlinks and + // have proper ownership. + subdirs := []string{m.PolicyDir(), m.ProtectorDir()} + for _, path := range subdirs { + info, err := loggedLstat(path) + if err != nil { + return &ErrNotSetup{m} + } + if (info.Mode() & os.ModeSymlink) != 0 { + log.Printf("directory %q cannot be a symlink", path) + return &ErrNotSetup{m} + } + if !info.IsDir() { + log.Printf("%q is not a directory", path) + return &ErrNotSetup{m} + } + // We are no longer too picky about the mode, given that + // 'fscrypt setup' now offers a choice of two different modes, + // and system administrators could customize it further. + // However, we can at least verify that if the directory is + // world-writable, then the sticky bit is also set. + if info.Mode()&(os.ModeSticky|0002) == 0002 { + log.Printf("%q is world-writable but doesn't have sticky bit set", path) + return &ErrInsecurePermissions{path} + } + if !checkOwnership(path, info, trustedUser) { + return &ErrSetupByAnotherUser{m} + } + } + return nil +} + +// makeDirectories creates the three metadata directories with the correct +// permissions. Note that this function overrides the umask. +func (m *Mount) makeDirectories(setupMode SetupMode) error { + // Zero the umask so we get the permissions we want + oldMask := unix.Umask(0) + defer func() { + unix.Umask(oldMask) + }() + + if err := os.Mkdir(m.BaseDir(), basePermissions); err != nil { + return err + } + + var dirMode os.FileMode + switch setupMode { + case SingleUserWritable: + dirMode = 0755 + case WorldWritable: + dirMode = os.ModeSticky | 0777 + } + if err := os.Mkdir(m.PolicyDir(), dirMode); err != nil { + return err + } + return os.Mkdir(m.ProtectorDir(), dirMode) +} + +// GetSetupMode returns the current mode for fscrypt metadata creation on this +// filesystem. +func (m *Mount) GetSetupMode() (SetupMode, *user.User, error) { + info1, err1 := os.Stat(m.PolicyDir()) + info2, err2 := os.Stat(m.ProtectorDir()) + + if err1 == nil && err2 == nil { + mask := os.ModeSticky | 0777 + mode1 := info1.Mode() & mask + mode2 := info2.Mode() & mask + uid1 := info1.Sys().(*syscall.Stat_t).Uid + uid2 := info2.Sys().(*syscall.Stat_t).Uid + user, err := util.UserFromUID(int64(uid1)) + if err == nil && mode1 == mode2 && uid1 == uid2 { + switch mode1 { + case mask: + return WorldWritable, nil, nil + case 0755: + return SingleUserWritable, user, nil + } + } + log.Printf("filesystem %s uses custom permissions on metadata directories", m.Path) + } + return -1, nil, errors.New("unable to determine setup mode") +} + +// Setup sets up the filesystem for use with fscrypt. Note that this merely +// creates the appropriate files on the filesystem. It does not actually modify +// the filesystem's feature flags. This operation is atomic; it either succeeds +// or no files in the baseDir are created. +func (m *Mount) Setup(mode SetupMode) error { + if m.CheckSetup(nil) == nil { + return &ErrAlreadySetup{m} + } + if !m.isFscryptSetupAllowed() { + return &ErrSetupNotSupported{m} + } + // We build the directories under a temp Mount and then move into place. + temp, err := m.tempMount() + if err != nil { + return err + } + defer os.RemoveAll(temp.Path) + + if err = temp.makeDirectories(mode); err != nil { + return err + } + + // Atomically move directory into place. + return os.Rename(temp.BaseDir(), m.BaseDir()) +} + +// RemoveAllMetadata removes all the policy and protector metadata from the +// filesystem. This operation is atomic; it either succeeds or no files in the +// baseDir are removed. +// WARNING: Will cause data loss if the metadata is used to encrypt +// directories (this could include directories on other filesystems). +func (m *Mount) RemoveAllMetadata() error { + if err := m.CheckSetup(nil); err != nil { + return err + } + // temp will hold the old metadata temporarily + temp, err := m.tempMount() + if err != nil { + return err + } + defer os.RemoveAll(temp.Path) + + // Move directory into temp (to be destroyed on defer) + return os.Rename(m.BaseDir(), temp.BaseDir()) +} + +func syncDirectory(dirPath string) error { + dirFile, err := os.Open(dirPath) + if err != nil { + return err + } + if err = dirFile.Sync(); err != nil { + dirFile.Close() + return err + } + return dirFile.Close() +} + +func (m *Mount) overwriteDataNonAtomic(path string, data []byte) error { + file, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|unix.O_NOFOLLOW, 0) + if err != nil { + return err + } + if _, err = file.Write(data); err != nil { + log.Printf("WARNING: overwrite of %q failed; file will be corrupted!", path) + file.Close() + return err + } + if err = file.Sync(); err != nil { + file.Close() + return err + } + if err = file.Close(); err != nil { + return err + } + log.Printf("successfully overwrote %q non-atomically", path) + return nil +} + +// writeData writes the given data to the given path such that, if possible, the +// data is either written to stable storage or an error is returned. If a file +// already exists at the path, it will be replaced. +// +// However, if the process doesn't have write permission to the directory but +// does have write permission to the file itself, then as a fallback the file is +// overwritten in-place rather than replaced. Note that this may be non-atomic. +func (m *Mount) writeData(path string, data []byte, owner *user.User, mode os.FileMode) error { + // Write the data to a temporary file, sync it, then rename into place + // so that the operation will be atomic. + dirPath := filepath.Dir(path) + tempFile, err := ioutil.TempFile(dirPath, tempPrefix) + if err != nil { + log.Print(err) + if os.IsPermission(err) { + if _, err = os.Lstat(path); err == nil { + log.Printf("trying non-atomic overwrite of %q", path) + return m.overwriteDataNonAtomic(path, data) + } + return &ErrNoCreatePermission{m} + } + return err + } + defer os.Remove(tempFile.Name()) + + // Ensure the new file has the right permissions mask. + if err = tempFile.Chmod(mode); err != nil { + tempFile.Close() + return err + } + // Override the file owner if one was specified. This happens when root + // needs to create files owned by a particular user. + if owner != nil { + if err = util.Chown(tempFile, owner); err != nil { + log.Printf("could not set owner of %q to %v: %v", + path, owner.Username, err) + tempFile.Close() + return err + } + } + if _, err = tempFile.Write(data); err != nil { + tempFile.Close() + return err + } + if err = tempFile.Sync(); err != nil { + tempFile.Close() + return err + } + if err = tempFile.Close(); err != nil { + return err + } + + if err = os.Rename(tempFile.Name(), path); err != nil { + return err + } + // Ensure the rename has been persisted before returning success. + return syncDirectory(dirPath) +} + +// addMetadata writes the metadata structure to the file with the specified +// path. This will overwrite any existing data. The operation is atomic. +func (m *Mount) addMetadata(path string, md metadata.Metadata, owner *user.User) error { + if err := md.CheckValidity(); err != nil { + return errors.Wrap(err, "provided metadata is invalid") + } + + data, err := proto.Marshal(md) + if err != nil { + return err + } + + mode := filePermissions + // If the file already exists, then preserve its owner and mode if + // possible. This is necessary because by default, for atomicity + // reasons we'll replace the file rather than overwrite it. + info, err := os.Lstat(path) + if err == nil { + if owner == nil && util.IsUserRoot() { + uid := info.Sys().(*syscall.Stat_t).Uid + if owner, err = util.UserFromUID(int64(uid)); err != nil { + log.Print(err) + } + } + mode = info.Mode() & 0777 + } else if !os.IsNotExist(err) { + log.Print(err) + } + + if owner != nil { + log.Printf("writing metadata to %q and setting owner to %s", path, owner.Username) + } else { + log.Printf("writing metadata to %q", path) + } + return m.writeData(path, data, owner, mode) +} + +// readMetadataFileSafe gets the contents of a metadata file extra-carefully, +// considering that it could be a malicious file created to cause a +// denial-of-service. Specifically, the following checks are done: +// +// - It must be a regular file, not another type of file like a symlink or FIFO. +// (Symlinks aren't bad by themselves, but given that a malicious user could +// point one to absolutely anywhere, and there is no known use case for the +// metadata files themselves being symlinks, it seems best to disallow them.) +// - It must have a reasonable size (<= maxMetadataFileSize). +// - If trustedUser is non-nil, then the file must be owned by the given user +// or by root. +// +// Take care to avoid TOCTOU (time-of-check-time-of-use) bugs when doing these +// tests. Notably, we must open the file before checking the file type, as the +// file type could change between any previous checks and the open. When doing +// this, O_NOFOLLOW is needed to avoid following a symlink (this applies to the +// last path component only), and O_NONBLOCK is needed to avoid blocking if the +// file is a FIFO. +// +// This function returns the data read as well as the UID of the user who owns +// the file. The returned UID is needed for login protectors, where the UID +// needs to be cross-checked with the UID stored in the file itself. +func readMetadataFileSafe(path string, trustedUser *user.User) ([]byte, int64, error) { + file, err := os.OpenFile(path, os.O_RDONLY|unix.O_NOFOLLOW|unix.O_NONBLOCK, 0) + if err != nil { + return nil, -1, err + } + defer file.Close() + + info, err := file.Stat() + if err != nil { + return nil, -1, err + } + if !info.Mode().IsRegular() { + return nil, -1, &ErrCorruptMetadata{path, errors.New("not a regular file")} + } + if !checkOwnership(path, info, trustedUser) { + return nil, -1, &ErrCorruptMetadata{path, errors.New("metadata file belongs to another user")} + } + // Clear O_NONBLOCK, since it has served its purpose when opening the + // file, and the behavior of reading from a regular file with O_NONBLOCK + // is technically unspecified. + if _, err = unix.FcntlInt(file.Fd(), unix.F_SETFL, 0); err != nil { + return nil, -1, &os.PathError{Op: "clearing O_NONBLOCK", Path: path, Err: err} + } + // Read the file contents, allowing at most maxMetadataFileSize bytes. + reader := &io.LimitedReader{R: file, N: maxMetadataFileSize + 1} + data, err := ioutil.ReadAll(reader) + if err != nil { + return nil, -1, err + } + if reader.N == 0 { + return nil, -1, &ErrCorruptMetadata{path, errors.New("metadata file size limit exceeded")} + } + return data, int64(info.Sys().(*syscall.Stat_t).Uid), nil +} + +// getMetadata reads the metadata structure from the file with the specified +// path. Only reads normal metadata files, not linked metadata. +func (m *Mount) getMetadata(path string, trustedUser *user.User, md metadata.Metadata) (int64, error) { + data, owner, err := readMetadataFileSafe(path, trustedUser) + if err != nil { + log.Printf("could not read metadata from %q: %v", path, err) + return -1, err + } + + if err := proto.Unmarshal(data, md); err != nil { + return -1, &ErrCorruptMetadata{path, err} + } + + if err := md.CheckValidity(); err != nil { + return -1, &ErrCorruptMetadata{path, err} + } + + log.Printf("successfully read metadata from %q", path) + return owner, nil +} + +// removeMetadata deletes the metadata struct from the file with the specified +// path. Works with regular or linked metadata. +func (m *Mount) removeMetadata(path string) error { + if err := os.Remove(path); err != nil { + log.Printf("could not remove metadata file at %q: %v", path, err) + return err + } + + log.Printf("successfully removed metadata file at %q", path) + return nil +} + +// AddProtector adds the protector metadata to this filesystem's storage. This +// will overwrite the value of an existing protector with this descriptor. This +// will fail with ErrLinkedProtector if a linked protector with this descriptor +// already exists on the filesystem. +func (m *Mount) AddProtector(data *metadata.ProtectorData, owner *user.User) error { + var err error + if err = m.CheckSetup(nil); err != nil { + return err + } + if isRegularFile(m.linkedProtectorPath(data.ProtectorDescriptor)) { + return errors.Errorf("cannot modify linked protector %s on filesystem %s", + data.ProtectorDescriptor, m.Path) + } + path := m.protectorPath(data.ProtectorDescriptor) + return m.addMetadata(path, data, owner) +} + +// AddLinkedProtector adds a link in this filesystem to the protector metadata +// in the dest filesystem, if one doesn't already exist. On success, the return +// value is a nil error and a bool that is true iff the link is newly created. +func (m *Mount) AddLinkedProtector(descriptor string, dest *Mount, trustedUser *user.User, + ownerIfCreating *user.User) (bool, error) { + if err := m.CheckSetup(trustedUser); err != nil { + return false, err + } + // Check that the link is good (descriptor exists, filesystem has UUID). + if _, err := dest.GetRegularProtector(descriptor, trustedUser); err != nil { + return false, err + } + + linkPath := m.linkedProtectorPath(descriptor) + + // Check whether the link already exists. + existingLink, _, err := readMetadataFileSafe(linkPath, trustedUser) + if err == nil { + existingLinkedMnt, err := getMountFromLink(string(existingLink)) + if err != nil { + return false, errors.Wrap(err, linkPath) + } + if existingLinkedMnt != dest { + return false, errors.Errorf("link %q points to %q, but expected %q", + linkPath, existingLinkedMnt.Path, dest.Path) + } + return false, nil + } + if !os.IsNotExist(err) { + return false, err + } + + var newLink string + newLink, err = makeLink(dest) + if err != nil { + return false, err + } + return true, m.writeData(linkPath, []byte(newLink), ownerIfCreating, filePermissions) +} + +// GetRegularProtector looks up the protector metadata by descriptor. This will +// fail with ErrProtectorNotFound if the descriptor is a linked protector. +func (m *Mount) GetRegularProtector(descriptor string, trustedUser *user.User) (*metadata.ProtectorData, error) { + if err := m.CheckSetup(trustedUser); err != nil { + return nil, err + } + data := new(metadata.ProtectorData) + path := m.protectorPath(descriptor) + owner, err := m.getMetadata(path, trustedUser, data) + if os.IsNotExist(err) { + err = &ErrProtectorNotFound{descriptor, m} + } + if err != nil { + return nil, err + } + // Login protectors have their UID stored in the file. Since normally + // any user can create files in the fscrypt metadata directories, for a + // login protector to be considered valid it *must* be owned by the + // claimed user or by root. Note: fscrypt v0.3.2 and later always makes + // login protectors owned by the user, but previous versions could + // create them owned by root -- that is the main reason we allow root. + if data.Source == metadata.SourceType_pam_passphrase && owner != 0 && owner != data.Uid { + log.Printf("WARNING: %q claims to be the login protector for uid %d, but it is owned by uid %d. Needs to be %d or 0.", + path, data.Uid, owner, data.Uid) + return nil, &ErrCorruptMetadata{path, errors.New("login protector belongs to wrong user")} + } + return data, nil +} + +// GetProtector returns the Mount of the filesystem containing the information +// and that protector's data. If the descriptor is a regular (not linked) +// protector, the mount will return itself. +func (m *Mount) GetProtector(descriptor string, trustedUser *user.User) (*Mount, *metadata.ProtectorData, error) { + if err := m.CheckSetup(trustedUser); err != nil { + return nil, nil, err + } + // Get the link data from the link file + path := m.linkedProtectorPath(descriptor) + link, _, err := readMetadataFileSafe(path, trustedUser) + if err != nil { + // If the link doesn't exist, try for a regular protector. + if os.IsNotExist(err) { + data, err := m.GetRegularProtector(descriptor, trustedUser) + return m, data, err + } + return nil, nil, err + } + log.Printf("following protector link %s", path) + linkedMnt, err := getMountFromLink(string(link)) + if err != nil { + return nil, nil, errors.Wrap(err, path) + } + data, err := linkedMnt.GetRegularProtector(descriptor, trustedUser) + if err != nil { + return nil, nil, &ErrFollowLink{string(link), err} + } + return linkedMnt, data, nil +} + +// RemoveProtector deletes the protector metadata (or a link to another +// filesystem's metadata) from the filesystem storage. +func (m *Mount) RemoveProtector(descriptor string) error { + if err := m.CheckSetup(nil); err != nil { + return err + } + // We first try to remove the linkedProtector. If that metadata does not + // exist, we try to remove the normal protector. + err := m.removeMetadata(m.linkedProtectorPath(descriptor)) + if os.IsNotExist(err) { + err = m.removeMetadata(m.protectorPath(descriptor)) + if os.IsNotExist(err) { + err = &ErrProtectorNotFound{descriptor, m} + } + } + return err +} + +// ListProtectors lists the descriptors of all protectors on this filesystem. +// This does not include linked protectors. If trustedUser is non-nil, then +// the protectors are restricted to those owned by the given user or by root. +func (m *Mount) ListProtectors(trustedUser *user.User) ([]string, error) { + return m.listMetadata(m.ProtectorDir(), "protectors", trustedUser) +} + +// AddPolicy adds the policy metadata to the filesystem storage. +func (m *Mount) AddPolicy(data *metadata.PolicyData, owner *user.User) error { + if err := m.CheckSetup(nil); err != nil { + return err + } + + return m.addMetadata(m.PolicyPath(data.KeyDescriptor), data, owner) +} + +// GetPolicy looks up the policy metadata by descriptor. +func (m *Mount) GetPolicy(descriptor string, trustedUser *user.User) (*metadata.PolicyData, error) { + if err := m.CheckSetup(trustedUser); err != nil { + return nil, err + } + data := new(metadata.PolicyData) + _, err := m.getMetadata(m.PolicyPath(descriptor), trustedUser, data) + if os.IsNotExist(err) { + err = &ErrPolicyNotFound{descriptor, m} + } + return data, err +} + +// RemovePolicy deletes the policy metadata from the filesystem storage. +func (m *Mount) RemovePolicy(descriptor string) error { + if err := m.CheckSetup(nil); err != nil { + return err + } + err := m.removeMetadata(m.PolicyPath(descriptor)) + if os.IsNotExist(err) { + err = &ErrPolicyNotFound{descriptor, m} + } + return err +} + +// ListPolicies lists the descriptors of all policies on this filesystem. If +// trustedUser is non-nil, then the policies are restricted to those owned by +// the given user or by root. +func (m *Mount) ListPolicies(trustedUser *user.User) ([]string, error) { + return m.listMetadata(m.PolicyDir(), "policies", trustedUser) +} + +type namesAndTimes struct { + names []string + times []time.Time +} + +func (c namesAndTimes) Len() int { + return len(c.names) +} + +func (c namesAndTimes) Less(i, j int) bool { + return c.times[i].Before(c.times[j]) +} + +func (c namesAndTimes) Swap(i, j int) { + c.names[i], c.names[j] = c.names[j], c.names[i] + c.times[i], c.times[j] = c.times[j], c.times[i] +} + +func sortFileListByLastMtime(directoryPath string, names []string) error { + c := namesAndTimes{names: names, times: make([]time.Time, len(names))} + for i, name := range names { + fi, err := os.Lstat(filepath.Join(directoryPath, name)) + if err != nil { + return err + } + c.times[i] = fi.ModTime() + } + sort.Sort(c) + return nil +} + +// listDirectory returns a list of descriptors for a metadata directory, +// including files which are links to other filesystem's metadata. +func (m *Mount) listDirectory(directoryPath string) ([]string, error) { + dir, err := os.Open(directoryPath) + if err != nil { + return nil, err + } + defer dir.Close() + + names, err := dir.Readdirnames(-1) + if err != nil { + return nil, err + } + + if SortDescriptorsByLastMtime { + if err := sortFileListByLastMtime(directoryPath, names); err != nil { + return nil, err + } + } + + descriptors := make([]string, 0, len(names)) + for _, name := range names { + // Be sure to include links as well + descriptors = append(descriptors, strings.TrimSuffix(name, linkFileExtension)) + } + return descriptors, nil +} + +func (m *Mount) listMetadata(dirPath string, metadataType string, owner *user.User) ([]string, error) { + log.Printf("listing %s in %q", metadataType, dirPath) + if err := m.CheckSetup(owner); err != nil { + return nil, err + } + names, err := m.listDirectory(dirPath) + if err != nil { + return nil, err + } + filesIgnoredDescription := "" + if owner != nil { + filteredNames := make([]string, 0, len(names)) + uid := uint32(util.AtoiOrPanic(owner.Uid)) + for _, name := range names { + info, err := os.Lstat(filepath.Join(dirPath, name)) + if err != nil { + continue + } + fileUID := info.Sys().(*syscall.Stat_t).Uid + if fileUID != uid && fileUID != 0 { + continue + } + filteredNames = append(filteredNames, name) + } + numIgnored := len(names) - len(filteredNames) + if numIgnored != 0 { + filesIgnoredDescription = + fmt.Sprintf(" (ignored %d %s not owned by %s or root)", + numIgnored, metadataType, owner.Username) + } + names = filteredNames + } + log.Printf("found %d %s%s", len(names), metadataType, filesIgnoredDescription) + return names, nil +} diff --git a/vendor/github.com/google/fscrypt/filesystem/mountpoint.go b/vendor/github.com/google/fscrypt/filesystem/mountpoint.go new file mode 100644 index 000000000..0b0693b2b --- /dev/null +++ b/vendor/github.com/google/fscrypt/filesystem/mountpoint.go @@ -0,0 +1,578 @@ +/* + * mountpoint.go - Contains all the functionality for finding mountpoints and + * using UUIDs to refer to them. Specifically, we can find the mountpoint of a + * path, get info about a mountpoint, and find mountpoints with a specific UUID. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package filesystem + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + + "github.com/pkg/errors" +) + +var ( + // These maps hold data about the state of the system's filesystems. + // + // They only contain one Mount per filesystem, even if there are + // additional bind mounts, since we want to store fscrypt metadata in + // only one place per filesystem. When it is ambiguous which Mount + // should be used for a filesystem, mountsByDevice will contain an + // explicit nil entry, and mountsByPath won't contain an entry. + mountsByDevice map[DeviceNumber]*Mount + mountsByPath map[string]*Mount + // Used to make the mount functions thread safe + mountMutex sync.Mutex + // True if the maps have been successfully initialized. + mountsInitialized bool + // Supported tokens for filesystem links + uuidToken = "UUID" + pathToken = "PATH" + // Location to perform UUID lookup + uuidDirectory = "/dev/disk/by-uuid" +) + +// Unescape octal-encoded escape sequences in a string from the mountinfo file. +// The kernel encodes the ' ', '\t', '\n', and '\\' bytes this way. This +// function exactly inverts what the kernel does, including by preserving +// invalid UTF-8. +func unescapeString(str string) string { + var sb strings.Builder + for i := 0; i < len(str); i++ { + b := str[i] + if b == '\\' && i+3 < len(str) { + if parsed, err := strconv.ParseInt(str[i+1:i+4], 8, 8); err == nil { + b = uint8(parsed) + i += 3 + } + } + sb.WriteByte(b) + } + return sb.String() +} + +// EscapeString is the reverse of unescapeString. Use this to avoid injecting +// spaces or newlines into output that uses these characters as separators. +func EscapeString(str string) string { + var sb strings.Builder + for _, b := range []byte(str) { + switch b { + case ' ', '\t', '\n', '\\': + sb.WriteString(fmt.Sprintf("\\%03o", b)) + default: + sb.WriteByte(b) + } + } + return sb.String() +} + +// We get the device name via the device number rather than use the mount source +// field directly. This is necessary to handle a rootfs that was mounted via +// the kernel command line, since mountinfo always shows /dev/root for that. +// This assumes that the device nodes are in the standard location. +func getDeviceName(num DeviceNumber) string { + linkPath := fmt.Sprintf("/sys/dev/block/%v", num) + if target, err := os.Readlink(linkPath); err == nil { + return fmt.Sprintf("/dev/%s", filepath.Base(target)) + } + return "" +} + +// Parse one line of /proc/self/mountinfo. +// +// The line contains the following space-separated fields: +// [0] mount ID +// [1] parent ID +// [2] major:minor +// [3] root +// [4] mount point +// [5] mount options +// [6...n-1] optional field(s) +// [n] separator +// [n+1] filesystem type +// [n+2] mount source +// [n+3] super options +// +// For more details, see https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func parseMountInfoLine(line string) *Mount { + fields := strings.Split(line, " ") + if len(fields) < 10 { + return nil + } + + // Count the optional fields. In case new fields are appended later, + // don't simply assume that n == len(fields) - 4. + n := 6 + for fields[n] != "-" { + n++ + if n >= len(fields) { + return nil + } + } + if n+3 >= len(fields) { + return nil + } + + var mnt *Mount = &Mount{} + var err error + mnt.DeviceNumber, err = newDeviceNumberFromString(fields[2]) + if err != nil { + return nil + } + mnt.Subtree = unescapeString(fields[3]) + mnt.Path = unescapeString(fields[4]) + for _, opt := range strings.Split(fields[5], ",") { + if opt == "ro" { + mnt.ReadOnly = true + } + } + mnt.FilesystemType = unescapeString(fields[n+1]) + mnt.Device = getDeviceName(mnt.DeviceNumber) + return mnt +} + +type mountpointTreeNode struct { + mount *Mount + parent *mountpointTreeNode + children []*mountpointTreeNode +} + +func addUncontainedSubtreesRecursive(dst map[string]bool, + node *mountpointTreeNode, allUncontainedSubtrees map[string]bool) { + if allUncontainedSubtrees[node.mount.Subtree] { + dst[node.mount.Subtree] = true + } + for _, child := range node.children { + addUncontainedSubtreesRecursive(dst, child, allUncontainedSubtrees) + } +} + +// findMainMount finds the "main" Mount of a filesystem. The "main" Mount is +// where the filesystem's fscrypt metadata is stored. +// +// Normally, there is just one Mount and it's of the entire filesystem +// (mnt.Subtree == "/"). But in general, the filesystem might be mounted in +// multiple places, including "bind mounts" where mnt.Subtree != "/". Also, the +// filesystem might have a combination of read-write and read-only mounts. +// +// To handle most cases, we could just choose a mount with mnt.Subtree == "/", +// preferably a read-write mount. However, that doesn't work in containers +// where the "/" subtree might not be mounted. Here's a real-world example: +// +// mnt.Subtree mnt.Path +// ----------- -------- +// /var/lib/lxc/base/rootfs / +// /var/cache/pacman/pkg /var/cache/pacman/pkg +// /srv/repo/x86_64 /srv/http/x86_64 +// +// In this case, all mnt.Subtree are independent. To handle this case, we must +// choose the Mount whose mnt.Path contains the others, i.e. the first one. +// Note: the fscrypt metadata won't be usable from outside the container since +// it won't be at the real root of the filesystem, but that may be acceptable. +// +// However, we can't look *only* at mnt.Path, since in some cases mnt.Subtree is +// needed to correctly handle bind mounts. For example, in the following case, +// the first Mount should be chosen: +// +// mnt.Subtree mnt.Path +// ----------- -------- +// /foo /foo +// /foo/dir /dir +// +// To solve this, we divide the mounts into non-overlapping trees of mnt.Path. +// Then, we choose one of these trees which contains (exactly or via path +// prefix) *all* mnt.Subtree. We then return the root of this tree. In both +// the above examples, this algorithm returns the first Mount. +func findMainMount(filesystemMounts []*Mount) *Mount { + // Index this filesystem's mounts by path. Note: paths are unique here, + // since non-last mounts were already excluded earlier. + // + // Also build the set of all mounted subtrees. + filesystemMountsByPath := make(map[string]*mountpointTreeNode) + allSubtrees := make(map[string]bool) + for _, mnt := range filesystemMounts { + filesystemMountsByPath[mnt.Path] = &mountpointTreeNode{mount: mnt} + allSubtrees[mnt.Subtree] = true + } + + // Divide the mounts into non-overlapping trees of mountpoints. + for path, mntNode := range filesystemMountsByPath { + for path != "/" && mntNode.parent == nil { + path = filepath.Dir(path) + if parent := filesystemMountsByPath[path]; parent != nil { + mntNode.parent = parent + parent.children = append(parent.children, mntNode) + } + } + } + + // Build the set of mounted subtrees that aren't contained in any other + // mounted subtree. + allUncontainedSubtrees := make(map[string]bool) + for subtree := range allSubtrees { + contained := false + for t := subtree; t != "/" && !contained; { + t = filepath.Dir(t) + contained = allSubtrees[t] + } + if !contained { + allUncontainedSubtrees[subtree] = true + } + } + + // Select the root of a mountpoint tree whose mounted subtrees contain + // *all* mounted subtrees. Equivalently, select a mountpoint tree in + // which every uncontained subtree is mounted. + var mainMount *Mount + for _, mntNode := range filesystemMountsByPath { + mnt := mntNode.mount + if mntNode.parent != nil { + continue + } + uncontainedSubtrees := make(map[string]bool) + addUncontainedSubtreesRecursive(uncontainedSubtrees, mntNode, allUncontainedSubtrees) + if len(uncontainedSubtrees) != len(allUncontainedSubtrees) { + continue + } + // If there's more than one eligible mount, they should have the + // same Subtree. Otherwise it's ambiguous which one to use. + if mainMount != nil && mainMount.Subtree != mnt.Subtree { + log.Printf("Unsupported case: %q (%v) has multiple non-overlapping mounts. This filesystem will be ignored!", + mnt.Device, mnt.DeviceNumber) + return nil + } + // Prefer a read-write mount to a read-only one. + if mainMount == nil || mainMount.ReadOnly { + mainMount = mnt + } + } + return mainMount +} + +// This is separate from loadMountInfo() only for unit testing. +func readMountInfo(r io.Reader) error { + mountsByDevice = make(map[DeviceNumber]*Mount) + mountsByPath = make(map[string]*Mount) + allMountsByDevice := make(map[DeviceNumber][]*Mount) + allMountsByPath := make(map[string]*Mount) + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + mnt := parseMountInfoLine(line) + if mnt == nil { + log.Printf("ignoring invalid mountinfo line %q", line) + continue + } + + // We can only use mountpoints that are directories for fscrypt. + if !isDir(mnt.Path) { + log.Printf("ignoring mountpoint %q because it is not a directory", mnt.Path) + continue + } + + // Note this overrides the info if we have seen the mountpoint + // earlier in the file. This is correct behavior because the + // mountpoints are listed in mount order. + allMountsByPath[mnt.Path] = mnt + } + // For each filesystem, choose a "main" Mount and discard any additional + // bind mounts. fscrypt only cares about the main Mount, since it's + // where the fscrypt metadata is stored. Store all the main Mounts in + // mountsByDevice and mountsByPath so that they can be found later. + for _, mnt := range allMountsByPath { + allMountsByDevice[mnt.DeviceNumber] = + append(allMountsByDevice[mnt.DeviceNumber], mnt) + } + for deviceNumber, filesystemMounts := range allMountsByDevice { + mnt := findMainMount(filesystemMounts) + mountsByDevice[deviceNumber] = mnt // may store an explicit nil entry + if mnt != nil { + mountsByPath[mnt.Path] = mnt + } + } + return nil +} + +// loadMountInfo populates the Mount mappings by parsing /proc/self/mountinfo. +// It returns an error if the Mount mappings cannot be populated. +func loadMountInfo() error { + if !mountsInitialized { + file, err := os.Open("/proc/self/mountinfo") + if err != nil { + return err + } + defer file.Close() + if err := readMountInfo(file); err != nil { + return err + } + mountsInitialized = true + } + return nil +} + +func filesystemLacksMainMountError(deviceNumber DeviceNumber) error { + return errors.Errorf("Device %q (%v) lacks a \"main\" mountpoint in the current mount namespace, so it's ambiguous where to store the fscrypt metadata.", + getDeviceName(deviceNumber), deviceNumber) +} + +// AllFilesystems lists all mounted filesystems ordered by path to their "main" +// Mount. Use CheckSetup() to see if they are set up for use with fscrypt. +func AllFilesystems() ([]*Mount, error) { + mountMutex.Lock() + defer mountMutex.Unlock() + if err := loadMountInfo(); err != nil { + return nil, err + } + + mounts := make([]*Mount, 0, len(mountsByPath)) + for _, mount := range mountsByPath { + mounts = append(mounts, mount) + } + + sort.Sort(PathSorter(mounts)) + return mounts, nil +} + +// UpdateMountInfo updates the filesystem mountpoint maps with the current state +// of the filesystem mountpoints. Returns error if the initialization fails. +func UpdateMountInfo() error { + mountMutex.Lock() + defer mountMutex.Unlock() + mountsInitialized = false + return loadMountInfo() +} + +// FindMount returns the main Mount object for the filesystem which contains the +// file at the specified path. An error is returned if the path is invalid or if +// we cannot load the required mount data. If a mount has been updated since the +// last call to one of the mount functions, run UpdateMountInfo to see changes. +func FindMount(path string) (*Mount, error) { + mountMutex.Lock() + defer mountMutex.Unlock() + if err := loadMountInfo(); err != nil { + return nil, err + } + // First try to find the mount by the number of the containing device. + deviceNumber, err := getNumberOfContainingDevice(path) + if err != nil { + return nil, err + } + mnt, ok := mountsByDevice[deviceNumber] + if ok { + if mnt == nil { + return nil, filesystemLacksMainMountError(deviceNumber) + } + return mnt, nil + } + // The mount couldn't be found by the number of the containing device. + // Fall back to walking up the directory hierarchy and checking for a + // mount at each directory path. This is necessary for btrfs, where + // files report a different st_dev from the /proc/self/mountinfo entry. + curPath, err := canonicalizePath(path) + if err != nil { + return nil, err + } + for { + mnt := mountsByPath[curPath] + if mnt != nil { + return mnt, nil + } + // Move to the parent directory unless we have reached the root. + parent := filepath.Dir(curPath) + if parent == curPath { + return nil, errors.Errorf("couldn't find mountpoint containing %q", path) + } + curPath = parent + } +} + +// GetMount is like FindMount, except GetMount also returns an error if the path +// doesn't name the same file as the filesystem's "main" Mount. For example, if +// a filesystem is fully mounted at "/mnt" and if "/mnt/a" exists, then +// FindMount("/mnt/a") will succeed whereas GetMount("/mnt/a") will fail. This +// is true even if "/mnt/a" is a bind mount of part of the same filesystem. +func GetMount(mountpoint string) (*Mount, error) { + mnt, err := FindMount(mountpoint) + if err != nil { + return nil, &ErrNotAMountpoint{mountpoint} + } + // Check whether 'mountpoint' names the same directory as 'mnt.Path'. + // Use os.SameFile() (i.e., compare inode numbers) rather than compare + // canonical paths, since filesystems may be mounted in multiple places. + fi1, err := os.Stat(mountpoint) + if err != nil { + return nil, err + } + fi2, err := os.Stat(mnt.Path) + if err != nil { + return nil, err + } + if !os.SameFile(fi1, fi2) { + return nil, &ErrNotAMountpoint{mountpoint} + } + return mnt, nil +} + +func uuidToDeviceNumber(uuid string) (DeviceNumber, error) { + uuidSymlinkPath := filepath.Join(uuidDirectory, uuid) + return getDeviceNumber(uuidSymlinkPath) +} + +func deviceNumberToMount(deviceNumber DeviceNumber) (*Mount, bool) { + mountMutex.Lock() + defer mountMutex.Unlock() + if err := loadMountInfo(); err != nil { + log.Print(err) + return nil, false + } + mnt, ok := mountsByDevice[deviceNumber] + return mnt, ok +} + +// getMountFromLink returns the main Mount, if any, for the filesystem which the +// given link points to. The link should contain a series of token-value pairs +// (=), one per line. The supported tokens are "UUID" and "PATH". +// If the UUID is present and it works, then it is used; otherwise, PATH is used +// if it is present. (The fallback from UUID to PATH will keep the link working +// if the UUID of the target filesystem changes but its mountpoint doesn't.) +// +// If a mount has been updated since the last call to one of the mount +// functions, make sure to run UpdateMountInfo first. +func getMountFromLink(link string) (*Mount, error) { + // Parse the link. + uuid := "" + path := "" + lines := strings.Split(link, "\n") + for _, line := range lines { + line := strings.TrimSpace(line) + if line == "" { + continue + } + pair := strings.Split(line, "=") + if len(pair) != 2 { + log.Printf("ignoring invalid line in filesystem link file: %q", line) + continue + } + token := pair[0] + value := pair[1] + switch token { + case uuidToken: + uuid = value + case pathToken: + path = value + default: + log.Printf("ignoring unknown link token %q", token) + } + } + // At least one of UUID and PATH must be present. + if uuid == "" && path == "" { + return nil, &ErrFollowLink{link, errors.Errorf("invalid filesystem link file")} + } + + // Try following the UUID. + errMsg := "" + if uuid != "" { + deviceNumber, err := uuidToDeviceNumber(uuid) + if err == nil { + mnt, ok := deviceNumberToMount(deviceNumber) + if mnt != nil { + log.Printf("resolved filesystem link using UUID %q", uuid) + return mnt, nil + } + if ok { + return nil, &ErrFollowLink{link, filesystemLacksMainMountError(deviceNumber)} + } + log.Printf("cannot find filesystem with UUID %q", uuid) + } else { + log.Printf("cannot find filesystem with UUID %q: %v", uuid, err) + } + errMsg += fmt.Sprintf("cannot find filesystem with UUID %q", uuid) + if path != "" { + log.Printf("falling back to using mountpoint path instead of UUID") + } + } + // UUID didn't work. As a fallback, try the mountpoint path. + if path != "" { + mnt, err := GetMount(path) + if mnt != nil { + log.Printf("resolved filesystem link using mountpoint path %q", path) + return mnt, nil + } + log.Print(err) + if errMsg == "" { + errMsg = fmt.Sprintf("cannot find filesystem with main mountpoint %q", path) + } else { + errMsg += fmt.Sprintf(" or main mountpoint %q", path) + } + } + // No method worked; return an error. + return nil, &ErrFollowLink{link, errors.New(errMsg)} +} + +func (mnt *Mount) getFilesystemUUID() (string, error) { + dirContents, err := ioutil.ReadDir(uuidDirectory) + if err != nil { + return "", err + } + for _, fileInfo := range dirContents { + if fileInfo.Mode()&os.ModeSymlink == 0 { + continue // Only interested in UUID symlinks + } + uuid := fileInfo.Name() + deviceNumber, err := uuidToDeviceNumber(uuid) + if err != nil { + log.Print(err) + continue + } + if mnt.DeviceNumber == deviceNumber { + return uuid, nil + } + } + return "", errors.Errorf("cannot determine UUID of device %q (%v)", + mnt.Device, mnt.DeviceNumber) +} + +// makeLink creates the contents of a link file which will point to the given +// filesystem. This will normally be a string of the form +// "UUID=\nPATH=\n". If the UUID cannot be determined, the UUID +// portion will be omitted. +func makeLink(mnt *Mount) (string, error) { + uuid, err := mnt.getFilesystemUUID() + if err != nil { + // The UUID could not be determined. This happens for btrfs + // filesystems, as the device number found via + // /dev/disk/by-uuid/* for btrfs filesystems differs from the + // actual device number of the mounted filesystem. Just rely + // entirely on the fallback to mountpoint path. + log.Print(err) + return fmt.Sprintf("%s=%s\n", pathToken, mnt.Path), nil + } + return fmt.Sprintf("%s=%s\n%s=%s\n", uuidToken, uuid, pathToken, mnt.Path), nil +} diff --git a/vendor/github.com/google/fscrypt/filesystem/path.go b/vendor/github.com/google/fscrypt/filesystem/path.go new file mode 100644 index 000000000..8cfb23574 --- /dev/null +++ b/vendor/github.com/google/fscrypt/filesystem/path.go @@ -0,0 +1,128 @@ +/* + * path.go - Utility functions for dealing with filesystem paths + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package filesystem + +import ( + "fmt" + "log" + "os" + "path/filepath" + + "golang.org/x/sys/unix" + + "github.com/pkg/errors" +) + +// OpenFileOverridingUmask calls os.OpenFile but with the umask overridden so +// that no permission bits are masked out if the file is created. +func OpenFileOverridingUmask(name string, flag int, perm os.FileMode) (*os.File, error) { + oldMask := unix.Umask(0) + defer unix.Umask(oldMask) + return os.OpenFile(name, flag, perm) +} + +// canonicalizePath turns path into an absolute path without symlinks. +func canonicalizePath(path string) (string, error) { + path, err := filepath.Abs(path) + if err != nil { + return "", err + } + path, err = filepath.EvalSymlinks(path) + + // Get a better error if we have an invalid path + if pathErr, ok := err.(*os.PathError); ok { + err = errors.Wrap(pathErr.Err, pathErr.Path) + } + + return path, err +} + +// loggedStat runs os.Stat, but it logs the error if stat returns any error +// other than nil or IsNotExist. +func loggedStat(name string) (os.FileInfo, error) { + info, err := os.Stat(name) + if err != nil && !os.IsNotExist(err) { + log.Print(err) + } + return info, err +} + +// loggedLstat runs os.Lstat (doesn't dereference trailing symlink), but it logs +// the error if lstat returns any error other than nil or IsNotExist. +func loggedLstat(name string) (os.FileInfo, error) { + info, err := os.Lstat(name) + if err != nil && !os.IsNotExist(err) { + log.Print(err) + } + return info, err +} + +// isDir returns true if the path exists and is that of a directory. +func isDir(path string) bool { + info, err := loggedStat(path) + return err == nil && info.IsDir() +} + +// isRegularFile returns true if the path exists and is that of a regular file. +func isRegularFile(path string) bool { + info, err := loggedStat(path) + return err == nil && info.Mode().IsRegular() +} + +// HaveReadAccessTo returns true if the process has read access to a file or +// directory, without actually opening it. +func HaveReadAccessTo(path string) bool { + return unix.Access(path, unix.R_OK) == nil +} + +// DeviceNumber represents a combined major:minor device number. +type DeviceNumber uint64 + +func (num DeviceNumber) String() string { + return fmt.Sprintf("%d:%d", unix.Major(uint64(num)), unix.Minor(uint64(num))) +} + +func newDeviceNumberFromString(str string) (DeviceNumber, error) { + var major, minor uint32 + if count, _ := fmt.Sscanf(str, "%d:%d", &major, &minor); count != 2 { + return 0, errors.Errorf("invalid device number string %q", str) + } + return DeviceNumber(unix.Mkdev(major, minor)), nil +} + +// getDeviceNumber returns the device number of the device node at the given +// path. If there is a symlink at the path, it is dereferenced. +func getDeviceNumber(path string) (DeviceNumber, error) { + var stat unix.Stat_t + if err := unix.Stat(path, &stat); err != nil { + return 0, err + } + return DeviceNumber(stat.Rdev), nil +} + +// getNumberOfContainingDevice returns the device number of the filesystem which +// contains the given file. If the file is a symlink, it is not dereferenced. +func getNumberOfContainingDevice(path string) (DeviceNumber, error) { + var stat unix.Stat_t + if err := unix.Lstat(path, &stat); err != nil { + return 0, err + } + return DeviceNumber(stat.Dev), nil +} diff --git a/vendor/github.com/google/fscrypt/keyring/fs_keyring.go b/vendor/github.com/google/fscrypt/keyring/fs_keyring.go new file mode 100644 index 000000000..9b949b9ea --- /dev/null +++ b/vendor/github.com/google/fscrypt/keyring/fs_keyring.go @@ -0,0 +1,326 @@ +/* + * fs_keyring.go - Add/remove encryption policy keys to/from filesystem + * + * Copyright 2019 Google LLC + * Author: Eric Biggers (ebiggers@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package keyring + +/* +#include +*/ +import "C" + +import ( + "encoding/hex" + "log" + "os" + "os/user" + "sync" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/security" + "github.com/google/fscrypt/util" +) + +var ( + fsKeyringSupported bool + fsKeyringSupportedKnown bool + fsKeyringSupportedLock sync.Mutex +) + +func checkForFsKeyringSupport(mount *filesystem.Mount) bool { + dir, err := os.Open(mount.Path) + if err != nil { + log.Printf("Unexpected error opening %q. Assuming filesystem keyring is unsupported.", + mount.Path) + return false + } + defer dir.Close() + + // FS_IOC_ADD_ENCRYPTION_KEY with a NULL argument will fail with ENOTTY + // if the ioctl isn't supported. Otherwise it should fail with EFAULT. + // + // Note that there's no need to check for FS_IOC_REMOVE_ENCRYPTION_KEY + // support separately, since it's guaranteed to be available if + // FS_IOC_ADD_ENCRYPTION_KEY is. There's also no need to check for + // support on every filesystem separately, since either the kernel + // supports the ioctls on all fscrypt-capable filesystems or it doesn't. + _, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(), unix.FS_IOC_ADD_ENCRYPTION_KEY, 0) + if errno == unix.ENOTTY { + log.Printf("Kernel doesn't support filesystem keyring. Falling back to user keyring.") + return false + } + if errno == unix.EFAULT { + log.Printf("Detected support for filesystem keyring") + } else { + // EFAULT is expected, but as long as we didn't get ENOTTY the + // ioctl should be available. + log.Printf("Unexpected error from FS_IOC_ADD_ENCRYPTION_KEY(%q, NULL): %v", mount.Path, errno) + } + return true +} + +// IsFsKeyringSupported returns true if the kernel supports the ioctls to +// add/remove fscrypt keys directly to/from the filesystem. For support to be +// detected, the given Mount must be for a filesystem that supports fscrypt. +func IsFsKeyringSupported(mount *filesystem.Mount) bool { + fsKeyringSupportedLock.Lock() + defer fsKeyringSupportedLock.Unlock() + if !fsKeyringSupportedKnown { + fsKeyringSupported = checkForFsKeyringSupport(mount) + fsKeyringSupportedKnown = true + } + return fsKeyringSupported +} + +// buildKeySpecifier converts the key descriptor string to an FscryptKeySpecifier. +func buildKeySpecifier(spec *unix.FscryptKeySpecifier, descriptor string) error { + descriptorBytes, err := hex.DecodeString(descriptor) + if err != nil { + return errors.Errorf("key descriptor %q is invalid", descriptor) + } + switch len(descriptorBytes) { + case unix.FSCRYPT_KEY_DESCRIPTOR_SIZE: + spec.Type = unix.FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR + case unix.FSCRYPT_KEY_IDENTIFIER_SIZE: + spec.Type = unix.FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER + default: + return errors.Errorf("key descriptor %q has unknown length", descriptor) + } + copy(spec.U[:], descriptorBytes) + return nil +} + +type savedPrivs struct { + ruid, euid, suid int +} + +// dropPrivsIfNeeded drops privileges (UIDs only) to the given user if we're +// working with a v2 policy key, and if the user is different from the user the +// process is currently running as. +// +// This is needed to change the effective UID so that FS_IOC_ADD_ENCRYPTION_KEY +// and FS_IOC_REMOVE_ENCRYPTION_KEY will add/remove a claim to the key for the +// intended user, and so that FS_IOC_GET_ENCRYPTION_KEY_STATUS will return the +// correct status flags for the user. +func dropPrivsIfNeeded(user *user.User, spec *unix.FscryptKeySpecifier) (*savedPrivs, error) { + if spec.Type == unix.FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR { + // v1 policy keys don't have any concept of user claims. + return nil, nil + } + targetUID := util.AtoiOrPanic(user.Uid) + ruid, euid, suid := security.GetUids() + if euid == targetUID { + return nil, nil + } + if err := security.SetUids(targetUID, targetUID, euid); err != nil { + return nil, err + } + return &savedPrivs{ruid, euid, suid}, nil +} + +// restorePrivs restores root privileges if needed. +func restorePrivs(privs *savedPrivs) error { + if privs != nil { + return security.SetUids(privs.ruid, privs.euid, privs.suid) + } + return nil +} + +// validateKeyDescriptor validates that the correct key descriptor was provided. +// This isn't really necessary; this is just an extra sanity check. +func validateKeyDescriptor(spec *unix.FscryptKeySpecifier, descriptor string) (string, error) { + if spec.Type != unix.FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER { + // v1 policy key: the descriptor is chosen arbitrarily by + // userspace, so there's nothing to validate. + return descriptor, nil + } + // v2 policy key. The descriptor ("identifier" in the kernel UAPI) is + // calculated as a cryptographic hash of the key itself. The kernel + // ignores the provided value, and calculates and returns it itself. So + // verify that the returned value is as expected. If it's not, the key + // doesn't actually match the encryption policy we thought it was for. + actual := hex.EncodeToString(spec.U[:unix.FSCRYPT_KEY_IDENTIFIER_SIZE]) + if descriptor == actual { + return descriptor, nil + } + return actual, + errors.Errorf("provided and actual key descriptors differ (%q != %q)", + descriptor, actual) +} + +// fsAddEncryptionKey adds the specified encryption key to the specified filesystem. +func fsAddEncryptionKey(key *crypto.Key, descriptor string, + mount *filesystem.Mount, user *user.User) error { + + dir, err := os.Open(mount.Path) + if err != nil { + return err + } + defer dir.Close() + + argKey, err := crypto.NewBlankKey(int(unsafe.Sizeof(unix.FscryptAddKeyArg{})) + key.Len()) + if err != nil { + return err + } + defer argKey.Wipe() + arg := (*unix.FscryptAddKeyArg)(argKey.UnsafePtr()) + + if err = buildKeySpecifier(&arg.Key_spec, descriptor); err != nil { + return err + } + + raw := unsafe.Pointer(uintptr(argKey.UnsafePtr()) + unsafe.Sizeof(*arg)) + arg.Raw_size = uint32(key.Len()) + C.memcpy(raw, key.UnsafePtr(), C.size_t(key.Len())) + + savedPrivs, err := dropPrivsIfNeeded(user, &arg.Key_spec) + if err != nil { + return err + } + _, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(), + unix.FS_IOC_ADD_ENCRYPTION_KEY, uintptr(argKey.UnsafePtr())) + restorePrivs(savedPrivs) + + log.Printf("FS_IOC_ADD_ENCRYPTION_KEY(%q, %s, ) = %v", mount.Path, descriptor, errno) + if errno != 0 { + return errors.Wrapf(errno, + "error adding key with descriptor %s to filesystem %s", + descriptor, mount.Path) + } + if descriptor, err = validateKeyDescriptor(&arg.Key_spec, descriptor); err != nil { + fsRemoveEncryptionKey(descriptor, mount, user) + return err + } + return nil +} + +// fsRemoveEncryptionKey removes the specified encryption key from the specified +// filesystem. +func fsRemoveEncryptionKey(descriptor string, mount *filesystem.Mount, + user *user.User) error { + + dir, err := os.Open(mount.Path) + if err != nil { + return err + } + defer dir.Close() + + var arg unix.FscryptRemoveKeyArg + if err = buildKeySpecifier(&arg.Key_spec, descriptor); err != nil { + return err + } + + ioc := uintptr(unix.FS_IOC_REMOVE_ENCRYPTION_KEY) + iocName := "FS_IOC_REMOVE_ENCRYPTION_KEY" + var savedPrivs *savedPrivs + if user == nil { + ioc = unix.FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS + iocName = "FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS" + } else { + savedPrivs, err = dropPrivsIfNeeded(user, &arg.Key_spec) + if err != nil { + return err + } + } + _, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(), ioc, uintptr(unsafe.Pointer(&arg))) + restorePrivs(savedPrivs) + + log.Printf("%s(%q, %s) = %v, removal_status_flags=0x%x", + iocName, mount.Path, descriptor, errno, arg.Removal_status_flags) + switch errno { + case 0: + switch { + case arg.Removal_status_flags&unix.FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS != 0: + return ErrKeyAddedByOtherUsers + case arg.Removal_status_flags&unix.FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY != 0: + return ErrKeyFilesOpen + } + return nil + case unix.ENOKEY: + // ENOKEY means either the key is completely missing or that the + // current user doesn't have a claim to it. Distinguish between + // these two cases by getting the key status. + if user != nil { + status, _ := fsGetEncryptionKeyStatus(descriptor, mount, user) + if status == KeyPresentButOnlyOtherUsers { + return ErrKeyAddedByOtherUsers + } + } + return ErrKeyNotPresent + default: + return errors.Wrapf(errno, + "error removing key with descriptor %s from filesystem %s", + descriptor, mount.Path) + } +} + +// fsGetEncryptionKeyStatus gets the status of the specified encryption key on +// the specified filesystem. +func fsGetEncryptionKeyStatus(descriptor string, mount *filesystem.Mount, + user *user.User) (KeyStatus, error) { + + dir, err := os.Open(mount.Path) + if err != nil { + return KeyStatusUnknown, err + } + defer dir.Close() + + var arg unix.FscryptGetKeyStatusArg + err = buildKeySpecifier(&arg.Key_spec, descriptor) + if err != nil { + return KeyStatusUnknown, err + } + + savedPrivs, err := dropPrivsIfNeeded(user, &arg.Key_spec) + if err != nil { + return KeyStatusUnknown, err + } + _, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(), + unix.FS_IOC_GET_ENCRYPTION_KEY_STATUS, uintptr(unsafe.Pointer(&arg))) + restorePrivs(savedPrivs) + + log.Printf("FS_IOC_GET_ENCRYPTION_KEY_STATUS(%q, %s) = %v, status=%d, status_flags=0x%x", + mount.Path, descriptor, errno, arg.Status, arg.Status_flags) + if errno != 0 { + return KeyStatusUnknown, + errors.Wrapf(errno, + "error getting status of key with descriptor %s on filesystem %s", + descriptor, mount.Path) + } + switch arg.Status { + case unix.FSCRYPT_KEY_STATUS_ABSENT: + return KeyAbsent, nil + case unix.FSCRYPT_KEY_STATUS_PRESENT: + if arg.Key_spec.Type != unix.FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && + (arg.Status_flags&unix.FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF) == 0 { + return KeyPresentButOnlyOtherUsers, nil + } + return KeyPresent, nil + case unix.FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED: + return KeyAbsentButFilesBusy, nil + default: + return KeyStatusUnknown, + errors.Errorf("unknown key status (%d) for key with descriptor %s on filesystem %s", + arg.Status, descriptor, mount.Path) + } +} diff --git a/vendor/github.com/google/fscrypt/keyring/keyring.go b/vendor/github.com/google/fscrypt/keyring/keyring.go new file mode 100644 index 000000000..5ddceaf8b --- /dev/null +++ b/vendor/github.com/google/fscrypt/keyring/keyring.go @@ -0,0 +1,175 @@ +/* + * keyring.go - Add/remove encryption policy keys to/from kernel + * + * Copyright 2019 Google LLC + * Author: Eric Biggers (ebiggers@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package keyring manages adding, removing, and getting the status of +// encryption policy keys to/from the kernel. Most public functions are in +// keyring.go, and they delegate to either user_keyring.go or fs_keyring.go, +// depending on whether a user keyring or a filesystem keyring is being used. +// +// v2 encryption policies always use the filesystem keyring. +// v1 policies use the user keyring by default, but can be configured to use the +// filesystem keyring instead (requires root and kernel v5.4+). +package keyring + +import ( + "encoding/hex" + "os/user" + "strconv" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// Keyring error values +var ( + ErrKeyAddedByOtherUsers = errors.New("other users have added the key too") + ErrKeyFilesOpen = errors.New("some files using the key are still open") + ErrKeyNotPresent = errors.New("key not present or already removed") + ErrV2PoliciesUnsupported = errors.New("kernel is too old to support v2 encryption policies") +) + +// Options are the options which specify *which* keyring the key should be +// added/removed/gotten to, and how. +type Options struct { + // Mount is the filesystem to which the key should be + // added/removed/gotten. + Mount *filesystem.Mount + // User is the user for whom the key should be added/removed/gotten. + User *user.User + // UseFsKeyringForV1Policies is true if keys for v1 encryption policies + // should be put in the filesystem's keyring (if supported) rather than + // in the user's keyring. Note that this makes AddEncryptionKey and + // RemoveEncryptionKey require root privileges. + UseFsKeyringForV1Policies bool +} + +func shouldUseFsKeyring(descriptor string, options *Options) (bool, error) { + // For v1 encryption policy keys, use the filesystem keyring if + // use_fs_keyring_for_v1_policies is set in /etc/fscrypt.conf and the + // kernel supports it. + if len(descriptor) == hex.EncodedLen(unix.FSCRYPT_KEY_DESCRIPTOR_SIZE) { + return options.UseFsKeyringForV1Policies && IsFsKeyringSupported(options.Mount), nil + } + // For v2 encryption policy keys, always use the filesystem keyring; the + // kernel doesn't support any other way. + if !IsFsKeyringSupported(options.Mount) { + return true, ErrV2PoliciesUnsupported + } + return true, nil +} + +// buildKeyDescription builds the description for an fscrypt key of type +// "logon". For ext4 and f2fs, it uses the legacy filesystem-specific prefixes +// for compatibility with kernels before v4.8 and v4.6 respectively. For other +// filesystems it uses the generic prefix "fscrypt". +func buildKeyDescription(options *Options, descriptor string) string { + switch options.Mount.FilesystemType { + case "ext4", "f2fs": + return options.Mount.FilesystemType + ":" + descriptor + default: + return unix.FSCRYPT_KEY_DESC_PREFIX + descriptor + } +} + +// AddEncryptionKey adds an encryption policy key to a kernel keyring. It uses +// either the filesystem keyring for the target Mount or the user keyring for +// the target User. +func AddEncryptionKey(key *crypto.Key, descriptor string, options *Options) error { + if err := util.CheckValidLength(metadata.PolicyKeyLen, key.Len()); err != nil { + return errors.Wrap(err, "policy key") + } + useFsKeyring, err := shouldUseFsKeyring(descriptor, options) + if err != nil { + return err + } + if useFsKeyring { + return fsAddEncryptionKey(key, descriptor, options.Mount, options.User) + } + return userAddKey(key, buildKeyDescription(options, descriptor), options.User) +} + +// RemoveEncryptionKey removes an encryption policy key from a kernel keyring. +// It uses either the filesystem keyring for the target Mount or the user +// keyring for the target User. +func RemoveEncryptionKey(descriptor string, options *Options, allUsers bool) error { + useFsKeyring, err := shouldUseFsKeyring(descriptor, options) + if err != nil { + return err + } + if useFsKeyring { + user := options.User + if allUsers { + user = nil + } + return fsRemoveEncryptionKey(descriptor, options.Mount, user) + } + return userRemoveKey(buildKeyDescription(options, descriptor), options.User) +} + +// KeyStatus is an enum that represents the status of a key in a kernel keyring. +type KeyStatus int + +// The possible values of KeyStatus. +const ( + KeyStatusUnknown = 0 + iota + KeyAbsent + KeyAbsentButFilesBusy + KeyPresent + KeyPresentButOnlyOtherUsers +) + +func (status KeyStatus) String() string { + switch status { + case KeyStatusUnknown: + return "Unknown" + case KeyAbsent: + return "Absent" + case KeyAbsentButFilesBusy: + return "AbsentButFilesBusy" + case KeyPresent: + return "Present" + case KeyPresentButOnlyOtherUsers: + return "PresentButOnlyOtherUsers" + default: + return strconv.Itoa(int(status)) + } +} + +// GetEncryptionKeyStatus gets the status of an encryption policy key in a +// kernel keyring. It uses either the filesystem keyring for the target Mount +// or the user keyring for the target User. +func GetEncryptionKeyStatus(descriptor string, options *Options) (KeyStatus, error) { + useFsKeyring, err := shouldUseFsKeyring(descriptor, options) + if err != nil { + return KeyStatusUnknown, err + } + if useFsKeyring { + return fsGetEncryptionKeyStatus(descriptor, options.Mount, options.User) + } + _, _, err = userFindKey(buildKeyDescription(options, descriptor), options.User) + if err != nil { + return KeyAbsent, nil + } + return KeyPresent, nil +} diff --git a/vendor/github.com/google/fscrypt/keyring/user_keyring.go b/vendor/github.com/google/fscrypt/keyring/user_keyring.go new file mode 100644 index 000000000..0ea468957 --- /dev/null +++ b/vendor/github.com/google/fscrypt/keyring/user_keyring.go @@ -0,0 +1,251 @@ +/* + * user_keyring.go - Add/remove encryption policy keys to/from user keyrings. + * This is the deprecated mechanism; see fs_keyring.go for the new mechanism. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package keyring + +import ( + "os/user" + "runtime" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "fmt" + "log" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/security" + "github.com/google/fscrypt/util" +) + +// ErrAccessUserKeyring indicates that a user's keyring cannot be +// accessed. +type ErrAccessUserKeyring struct { + TargetUser *user.User + UnderlyingError error +} + +func (err *ErrAccessUserKeyring) Error() string { + return fmt.Sprintf("could not access user keyring for %q: %s", + err.TargetUser.Username, err.UnderlyingError) +} + +// ErrSessionUserKeyring indicates that a user's keyring is not linked +// into the session keyring. +type ErrSessionUserKeyring struct { + TargetUser *user.User +} + +func (err *ErrSessionUserKeyring) Error() string { + return fmt.Sprintf("user keyring for %q is not linked into the session keyring", + err.TargetUser.Username) +} + +// KeyType is always logon as required by filesystem encryption. +const KeyType = "logon" + +// userAddKey puts the provided policy key into the user keyring for the +// specified user with the provided description, and type logon. +func userAddKey(key *crypto.Key, description string, targetUser *user.User) error { + runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring + defer runtime.UnlockOSThread() + + // Create our payload (containing an FscryptKey) + payload, err := crypto.NewBlankKey(int(unsafe.Sizeof(unix.FscryptKey{}))) + if err != nil { + return err + } + defer payload.Wipe() + + // Cast the payload to an FscryptKey so we can initialize the fields. + fscryptKey := (*unix.FscryptKey)(payload.UnsafePtr()) + // Mode is ignored by the kernel + fscryptKey.Mode = 0 + fscryptKey.Size = uint32(key.Len()) + copy(fscryptKey.Raw[:], key.Data()) + + keyringID, err := UserKeyringID(targetUser, true) + if err != nil { + return err + } + keyID, err := unix.AddKey(KeyType, description, payload.Data(), keyringID) + log.Printf("KeyctlAddKey(%s, %s, , %d) = %d, %v", + KeyType, description, keyringID, keyID, err) + if err != nil { + return errors.Wrapf(err, + "error adding key with description %s to user keyring for %q", + description, targetUser.Username) + } + return nil +} + +// userRemoveKey tries to remove a policy key from the user keyring with the +// provided description. An error is returned if the key does not exist. +func userRemoveKey(description string, targetUser *user.User) error { + runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring + defer runtime.UnlockOSThread() + + keyID, keyringID, err := userFindKey(description, targetUser) + if err != nil { + return ErrKeyNotPresent + } + + _, err = unix.KeyctlInt(unix.KEYCTL_UNLINK, keyID, keyringID, 0, 0) + log.Printf("KeyctlUnlink(%d, %d) = %v", keyID, keyringID, err) + if err != nil { + return errors.Wrapf(err, + "error removing key with description %s from user keyring for %q", + description, targetUser.Username) + } + return nil +} + +// userFindKey tries to locate a key with the provided description in the user +// keyring for the target user. The key ID and keyring ID are returned if we can +// find the key. An error is returned if the key does not exist. +func userFindKey(description string, targetUser *user.User) (int, int, error) { + runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring + defer runtime.UnlockOSThread() + + keyringID, err := UserKeyringID(targetUser, false) + if err != nil { + return 0, 0, err + } + + keyID, err := unix.KeyctlSearch(keyringID, KeyType, description, 0) + log.Printf("KeyctlSearch(%d, %s, %s) = %d, %v", keyringID, KeyType, description, keyID, err) + if err != nil { + return 0, 0, errors.Wrapf(err, + "error searching for key %s in user keyring for %q", + description, targetUser.Username) + } + return keyID, keyringID, err +} + +// UserKeyringID returns the key id of the target user's user keyring. We also +// ensure that the keyring will be accessible by linking it into the thread +// keyring and linking it into the root user keyring (permissions allowing). If +// checkSession is true, an error is returned if a normal user requests their +// user keyring, but it is not in the current session keyring. +func UserKeyringID(targetUser *user.User, checkSession bool) (int, error) { + runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring + defer runtime.UnlockOSThread() + + uid := util.AtoiOrPanic(targetUser.Uid) + targetKeyring, err := userKeyringIDLookup(uid) + if err != nil { + return 0, &ErrAccessUserKeyring{targetUser, err} + } + + if !util.IsUserRoot() { + // Make sure the returned keyring will be accessible by checking + // that it is in the session keyring. + if checkSession && !isUserKeyringInSession(uid) { + return 0, &ErrSessionUserKeyring{targetUser} + } + return targetKeyring, nil + } + + // Make sure the returned keyring will be accessible by linking it into + // the root user's user keyring (which will not be garbage collected). + rootKeyring, err := userKeyringIDLookup(0) + if err != nil { + return 0, errors.Wrapf(err, "error looking up root's user keyring") + } + + if rootKeyring != targetKeyring { + if err = keyringLink(targetKeyring, rootKeyring); err != nil { + return 0, errors.Wrapf(err, + "error linking user keyring for %q into root's user keyring", + targetUser.Username) + } + } + return targetKeyring, nil +} + +func userKeyringIDLookup(uid int) (keyringID int, err error) { + + // Our goals here are to: + // - Find the user keyring (for the provided uid) + // - Link it into the current thread keyring (so we can use it) + // - Make no permanent changes to the process privileges + // Complicating this are the facts that: + // - The value of KEY_SPEC_USER_KEYRING is determined by the ruid + // - Keyring linking permissions use the euid + // So we have to change both the ruid and euid to make this work, + // setting the suid to 0 so that we can later switch back. + ruid, euid, suid := security.GetUids() + if ruid != uid || euid != uid { + if err = security.SetUids(uid, uid, 0); err != nil { + return + } + defer func() { + resetErr := security.SetUids(ruid, euid, suid) + if resetErr != nil { + err = resetErr + } + }() + } + + // We get the value of KEY_SPEC_USER_KEYRING. Note that this will also + // trigger the creation of the uid keyring if it does not yet exist. + keyringID, err = unix.KeyctlGetKeyringID(unix.KEY_SPEC_USER_KEYRING, true) + log.Printf("keyringID(_uid.%d) = %d, %v", uid, keyringID, err) + if err != nil { + return 0, err + } + + // We still want to use this keyring after our privileges are reset. So + // we link it into the thread keyring, preventing a loss of access. + // + // We must be under LockOSThread() for this to work reliably. Note that + // we can't just use the process keyring, since it doesn't work reliably + // in Go programs, due to the Go runtime creating threads before the + // program starts and has a chance to create the process keyring. + if err = keyringLink(keyringID, unix.KEY_SPEC_THREAD_KEYRING); err != nil { + return 0, err + } + + return keyringID, nil +} + +// isUserKeyringInSession tells us if the user's uid keyring is in the current +// session keyring. +func isUserKeyringInSession(uid int) bool { + // We cannot use unix.KEY_SPEC_SESSION_KEYRING directly as that might + // create a session keyring if one does not exist. + sessionKeyring, err := unix.KeyctlGetKeyringID(unix.KEY_SPEC_SESSION_KEYRING, false) + log.Printf("keyringID(session) = %d, %v", sessionKeyring, err) + if err != nil { + return false + } + + description := fmt.Sprintf("_uid.%d", uid) + id, err := unix.KeyctlSearch(sessionKeyring, "keyring", description, 0) + log.Printf("KeyctlSearch(%d, keyring, %s) = %d, %v", sessionKeyring, description, id, err) + return err == nil +} + +func keyringLink(keyID int, keyringID int) error { + _, err := unix.KeyctlInt(unix.KEYCTL_LINK, keyID, keyringID, 0, 0) + log.Printf("KeyctlLink(%d, %d) = %v", keyID, keyringID, err) + return err +} diff --git a/vendor/github.com/google/fscrypt/metadata/checks.go b/vendor/github.com/google/fscrypt/metadata/checks.go new file mode 100644 index 000000000..84fd208c8 --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/checks.go @@ -0,0 +1,221 @@ +/* + * checks.go - Some sanity check methods for our metadata structures + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package metadata + +import ( + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + + "github.com/google/fscrypt/util" +) + +var errNotInitialized = errors.New("not initialized") + +// Metadata is the interface to all of the protobuf structures that can be +// checked for validity. +type Metadata interface { + CheckValidity() error + proto.Message +} + +// CheckValidity ensures the mode has a name and isn't empty. +func (m EncryptionOptions_Mode) CheckValidity() error { + if m == EncryptionOptions_default { + return errNotInitialized + } + if m.String() == "" { + return errors.Errorf("unknown %d", m) + } + return nil +} + +// CheckValidity ensures the source has a name and isn't empty. +func (s SourceType) CheckValidity() error { + if s == SourceType_default { + return errNotInitialized + } + if s.String() == "" { + return errors.Errorf("unknown %d", s) + } + return nil +} + +// CheckValidity ensures the hash costs will be accepted by Argon2. +func (h *HashingCosts) CheckValidity() error { + if h == nil { + return errNotInitialized + } + if h.Time <= 0 { + return errors.Errorf("time=%d is not positive", h.Time) + } + if h.Parallelism <= 0 { + return errors.Errorf("parallelism=%d is not positive", h.Parallelism) + } + minMemory := 8 * h.Parallelism + if h.Memory < minMemory { + return errors.Errorf("memory=%d is less than minimum (%d)", h.Memory, minMemory) + } + return nil +} + +// CheckValidity ensures our buffers are the correct length. +func (w *WrappedKeyData) CheckValidity() error { + if w == nil { + return errNotInitialized + } + if len(w.EncryptedKey) == 0 { + return errors.Wrap(errNotInitialized, "encrypted key") + } + if err := util.CheckValidLength(IVLen, len(w.IV)); err != nil { + return errors.Wrap(err, "IV") + } + return errors.Wrap(util.CheckValidLength(HMACLen, len(w.Hmac)), "HMAC") +} + +// CheckValidity ensures our ProtectorData has the correct fields for its source. +func (p *ProtectorData) CheckValidity() error { + if p == nil { + return errNotInitialized + } + + if err := p.Source.CheckValidity(); err != nil { + return errors.Wrap(err, "protector source") + } + + // Source specific checks + switch p.Source { + case SourceType_pam_passphrase: + if p.Uid < 0 { + return errors.Errorf("UID=%d is negative", p.Uid) + } + fallthrough + case SourceType_custom_passphrase: + if err := p.Costs.CheckValidity(); err != nil { + return errors.Wrap(err, "passphrase hashing costs") + } + if err := util.CheckValidLength(SaltLen, len(p.Salt)); err != nil { + return errors.Wrap(err, "passphrase hashing salt") + } + } + + // Generic checks + if err := p.WrappedKey.CheckValidity(); err != nil { + return errors.Wrap(err, "wrapped protector key") + } + if err := util.CheckValidLength(ProtectorDescriptorLen, len(p.ProtectorDescriptor)); err != nil { + return errors.Wrap(err, "protector descriptor") + + } + err := util.CheckValidLength(InternalKeyLen, len(p.WrappedKey.EncryptedKey)) + return errors.Wrap(err, "encrypted protector key") +} + +// CheckValidity ensures each of the options is valid. +func (e *EncryptionOptions) CheckValidity() error { + if e == nil { + return errNotInitialized + } + if _, ok := util.Index(e.Padding, paddingArray); !ok { + return errors.Errorf("padding of %d is invalid", e.Padding) + } + if err := e.Contents.CheckValidity(); err != nil { + return errors.Wrap(err, "contents encryption mode") + } + if err := e.Filenames.CheckValidity(); err != nil { + return errors.Wrap(err, "filenames encryption mode") + } + // If PolicyVersion is unset, treat it as 1. + if e.PolicyVersion == 0 { + e.PolicyVersion = 1 + } + if e.PolicyVersion != 1 && e.PolicyVersion != 2 { + return errors.Errorf("policy version of %d is invalid", e.PolicyVersion) + } + return nil +} + +// CheckValidity ensures the fields are valid and have the correct lengths. +func (w *WrappedPolicyKey) CheckValidity() error { + if w == nil { + return errNotInitialized + } + if err := w.WrappedKey.CheckValidity(); err != nil { + return errors.Wrap(err, "wrapped key") + } + if err := util.CheckValidLength(PolicyKeyLen, len(w.WrappedKey.EncryptedKey)); err != nil { + return errors.Wrap(err, "encrypted key") + } + err := util.CheckValidLength(ProtectorDescriptorLen, len(w.ProtectorDescriptor)) + return errors.Wrap(err, "wrapping protector descriptor") +} + +// CheckValidity ensures the fields and each wrapped key are valid. +func (p *PolicyData) CheckValidity() error { + if p == nil { + return errNotInitialized + } + // Check each wrapped key + for i, w := range p.WrappedPolicyKeys { + if err := w.CheckValidity(); err != nil { + return errors.Wrapf(err, "policy key slot %d", i) + } + } + + if err := p.Options.CheckValidity(); err != nil { + return errors.Wrap(err, "policy options") + } + + var expectedLen int + switch p.Options.PolicyVersion { + case 1: + expectedLen = PolicyDescriptorLenV1 + case 2: + expectedLen = PolicyDescriptorLenV2 + default: + return errors.Errorf("policy version of %d is invalid", p.Options.PolicyVersion) + } + + if err := util.CheckValidLength(expectedLen, len(p.KeyDescriptor)); err != nil { + return errors.Wrap(err, "policy key descriptor") + } + + return nil +} + +// CheckValidity ensures the Config has all the necessary info for its Source. +func (c *Config) CheckValidity() error { + // General checks + if c == nil { + return errNotInitialized + } + if err := c.Source.CheckValidity(); err != nil { + return errors.Wrap(err, "default config source") + } + + // Source specific checks + switch c.Source { + case SourceType_pam_passphrase, SourceType_custom_passphrase: + if err := c.HashCosts.CheckValidity(); err != nil { + return errors.Wrap(err, "config hashing costs") + } + } + + return errors.Wrap(c.Options.CheckValidity(), "config options") +} diff --git a/vendor/github.com/google/fscrypt/metadata/config.go b/vendor/github.com/google/fscrypt/metadata/config.go new file mode 100644 index 000000000..b3c872693 --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/config.go @@ -0,0 +1,59 @@ +/* + * config.go - Parsing for our global config file. The file is simply the JSON + * output of the Config protocol buffer. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package metadata contains all of the on disk structures. +// These structures are defined in metadata.proto. The package also +// contains functions for manipulating these structures, specifically: +// * Reading and Writing the Config file to disk +// * Getting and Setting Policies for directories +// * Reasonable defaults for a Policy's EncryptionOptions +package metadata + +import ( + "io" + + "github.com/golang/protobuf/jsonpb" +) + +// WriteConfig outputs the Config data as nicely formatted JSON +func WriteConfig(config *Config, out io.Writer) error { + m := jsonpb.Marshaler{ + EmitDefaults: true, + EnumsAsInts: false, + Indent: "\t", + OrigName: true, + } + if err := m.Marshal(out, config); err != nil { + return err + } + + _, err := out.Write([]byte{'\n'}) + return err +} + +// ReadConfig writes the JSON data into the config structure +func ReadConfig(in io.Reader) (*Config, error) { + config := new(Config) + // Allow (and ignore) unknown fields for forwards compatibility. + u := jsonpb.Unmarshaler{ + AllowUnknownFields: true, + } + return config, u.Unmarshal(in, config) +} diff --git a/vendor/github.com/google/fscrypt/metadata/constants.go b/vendor/github.com/google/fscrypt/metadata/constants.go new file mode 100644 index 000000000..fa6b8a759 --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/constants.go @@ -0,0 +1,57 @@ +/* + * constants.go - Some metadata constants used throughout fscrypt + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package metadata + +import ( + "crypto/sha256" + + "golang.org/x/sys/unix" +) + +// Lengths for our keys, buffers, and strings used in fscrypt. +const ( + // Length of policy descriptor (in hex chars) for v1 encryption policies + PolicyDescriptorLenV1 = 2 * unix.FSCRYPT_KEY_DESCRIPTOR_SIZE + // Length of protector descriptor (in hex chars) + ProtectorDescriptorLen = PolicyDescriptorLenV1 + // Length of policy descriptor (in hex chars) for v2 encryption policies + PolicyDescriptorLenV2 = 2 * unix.FSCRYPT_KEY_IDENTIFIER_SIZE + // We always use 256-bit keys internally (compared to 512-bit policy keys). + InternalKeyLen = 32 + IVLen = 16 + SaltLen = 16 + // We use SHA256 for the HMAC, and len(HMAC) == len(hash size). + HMACLen = sha256.Size + // PolicyKeyLen is the length of all keys passed directly to the Keyring + PolicyKeyLen = unix.FSCRYPT_MAX_KEY_SIZE +) + +var ( + // DefaultOptions use the supported encryption modes, max padding, and + // policy version 1. + DefaultOptions = &EncryptionOptions{ + Padding: 32, + Contents: EncryptionOptions_AES_256_XTS, + Filenames: EncryptionOptions_AES_256_CTS, + PolicyVersion: 1, + } + // DefaultSource is the source we use if none is specified. + DefaultSource = SourceType_custom_passphrase +) diff --git a/vendor/github.com/google/fscrypt/metadata/metadata.pb.go b/vendor/github.com/google/fscrypt/metadata/metadata.pb.go new file mode 100644 index 000000000..67098043c --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/metadata.pb.go @@ -0,0 +1,589 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: metadata/metadata.proto + +package metadata + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Specifies the method in which an outside secret is obtained for a Protector +type SourceType int32 + +const ( + SourceType_default SourceType = 0 + SourceType_pam_passphrase SourceType = 1 + SourceType_custom_passphrase SourceType = 2 + SourceType_raw_key SourceType = 3 +) + +var SourceType_name = map[int32]string{ + 0: "default", + 1: "pam_passphrase", + 2: "custom_passphrase", + 3: "raw_key", +} +var SourceType_value = map[string]int32{ + "default": 0, + "pam_passphrase": 1, + "custom_passphrase": 2, + "raw_key": 3, +} + +func (x SourceType) String() string { + return proto.EnumName(SourceType_name, int32(x)) +} +func (SourceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{0} +} + +// Type of encryption; should match declarations of unix.FSCRYPT_MODE +type EncryptionOptions_Mode int32 + +const ( + EncryptionOptions_default EncryptionOptions_Mode = 0 + EncryptionOptions_AES_256_XTS EncryptionOptions_Mode = 1 + EncryptionOptions_AES_256_GCM EncryptionOptions_Mode = 2 + EncryptionOptions_AES_256_CBC EncryptionOptions_Mode = 3 + EncryptionOptions_AES_256_CTS EncryptionOptions_Mode = 4 + EncryptionOptions_AES_128_CBC EncryptionOptions_Mode = 5 + EncryptionOptions_AES_128_CTS EncryptionOptions_Mode = 6 + EncryptionOptions_Adiantum EncryptionOptions_Mode = 9 +) + +var EncryptionOptions_Mode_name = map[int32]string{ + 0: "default", + 1: "AES_256_XTS", + 2: "AES_256_GCM", + 3: "AES_256_CBC", + 4: "AES_256_CTS", + 5: "AES_128_CBC", + 6: "AES_128_CTS", + 9: "Adiantum", +} +var EncryptionOptions_Mode_value = map[string]int32{ + "default": 0, + "AES_256_XTS": 1, + "AES_256_GCM": 2, + "AES_256_CBC": 3, + "AES_256_CTS": 4, + "AES_128_CBC": 5, + "AES_128_CTS": 6, + "Adiantum": 9, +} + +func (x EncryptionOptions_Mode) String() string { + return proto.EnumName(EncryptionOptions_Mode_name, int32(x)) +} +func (EncryptionOptions_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{3, 0} +} + +// Cost parameters to be used in our hashing functions. +type HashingCosts struct { + Time int64 `protobuf:"varint,2,opt,name=time,proto3" json:"time,omitempty"` + Memory int64 `protobuf:"varint,3,opt,name=memory,proto3" json:"memory,omitempty"` + Parallelism int64 `protobuf:"varint,4,opt,name=parallelism,proto3" json:"parallelism,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HashingCosts) Reset() { *m = HashingCosts{} } +func (m *HashingCosts) String() string { return proto.CompactTextString(m) } +func (*HashingCosts) ProtoMessage() {} +func (*HashingCosts) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{0} +} +func (m *HashingCosts) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HashingCosts.Unmarshal(m, b) +} +func (m *HashingCosts) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HashingCosts.Marshal(b, m, deterministic) +} +func (dst *HashingCosts) XXX_Merge(src proto.Message) { + xxx_messageInfo_HashingCosts.Merge(dst, src) +} +func (m *HashingCosts) XXX_Size() int { + return xxx_messageInfo_HashingCosts.Size(m) +} +func (m *HashingCosts) XXX_DiscardUnknown() { + xxx_messageInfo_HashingCosts.DiscardUnknown(m) +} + +var xxx_messageInfo_HashingCosts proto.InternalMessageInfo + +func (m *HashingCosts) GetTime() int64 { + if m != nil { + return m.Time + } + return 0 +} + +func (m *HashingCosts) GetMemory() int64 { + if m != nil { + return m.Memory + } + return 0 +} + +func (m *HashingCosts) GetParallelism() int64 { + if m != nil { + return m.Parallelism + } + return 0 +} + +// This structure is used for our authenticated wrapping/unwrapping of keys. +type WrappedKeyData struct { + IV []byte `protobuf:"bytes,1,opt,name=IV,proto3" json:"IV,omitempty"` + EncryptedKey []byte `protobuf:"bytes,2,opt,name=encrypted_key,json=encryptedKey,proto3" json:"encrypted_key,omitempty"` + Hmac []byte `protobuf:"bytes,3,opt,name=hmac,proto3" json:"hmac,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WrappedKeyData) Reset() { *m = WrappedKeyData{} } +func (m *WrappedKeyData) String() string { return proto.CompactTextString(m) } +func (*WrappedKeyData) ProtoMessage() {} +func (*WrappedKeyData) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{1} +} +func (m *WrappedKeyData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WrappedKeyData.Unmarshal(m, b) +} +func (m *WrappedKeyData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WrappedKeyData.Marshal(b, m, deterministic) +} +func (dst *WrappedKeyData) XXX_Merge(src proto.Message) { + xxx_messageInfo_WrappedKeyData.Merge(dst, src) +} +func (m *WrappedKeyData) XXX_Size() int { + return xxx_messageInfo_WrappedKeyData.Size(m) +} +func (m *WrappedKeyData) XXX_DiscardUnknown() { + xxx_messageInfo_WrappedKeyData.DiscardUnknown(m) +} + +var xxx_messageInfo_WrappedKeyData proto.InternalMessageInfo + +func (m *WrappedKeyData) GetIV() []byte { + if m != nil { + return m.IV + } + return nil +} + +func (m *WrappedKeyData) GetEncryptedKey() []byte { + if m != nil { + return m.EncryptedKey + } + return nil +} + +func (m *WrappedKeyData) GetHmac() []byte { + if m != nil { + return m.Hmac + } + return nil +} + +// The associated data for each protector +type ProtectorData struct { + ProtectorDescriptor string `protobuf:"bytes,1,opt,name=protector_descriptor,json=protectorDescriptor,proto3" json:"protector_descriptor,omitempty"` + Source SourceType `protobuf:"varint,2,opt,name=source,proto3,enum=metadata.SourceType" json:"source,omitempty"` + // These are only used by some of the protector types + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Costs *HashingCosts `protobuf:"bytes,4,opt,name=costs,proto3" json:"costs,omitempty"` + Salt []byte `protobuf:"bytes,5,opt,name=salt,proto3" json:"salt,omitempty"` + Uid int64 `protobuf:"varint,6,opt,name=uid,proto3" json:"uid,omitempty"` + WrappedKey *WrappedKeyData `protobuf:"bytes,7,opt,name=wrapped_key,json=wrappedKey,proto3" json:"wrapped_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProtectorData) Reset() { *m = ProtectorData{} } +func (m *ProtectorData) String() string { return proto.CompactTextString(m) } +func (*ProtectorData) ProtoMessage() {} +func (*ProtectorData) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{2} +} +func (m *ProtectorData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProtectorData.Unmarshal(m, b) +} +func (m *ProtectorData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProtectorData.Marshal(b, m, deterministic) +} +func (dst *ProtectorData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProtectorData.Merge(dst, src) +} +func (m *ProtectorData) XXX_Size() int { + return xxx_messageInfo_ProtectorData.Size(m) +} +func (m *ProtectorData) XXX_DiscardUnknown() { + xxx_messageInfo_ProtectorData.DiscardUnknown(m) +} + +var xxx_messageInfo_ProtectorData proto.InternalMessageInfo + +func (m *ProtectorData) GetProtectorDescriptor() string { + if m != nil { + return m.ProtectorDescriptor + } + return "" +} + +func (m *ProtectorData) GetSource() SourceType { + if m != nil { + return m.Source + } + return SourceType_default +} + +func (m *ProtectorData) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ProtectorData) GetCosts() *HashingCosts { + if m != nil { + return m.Costs + } + return nil +} + +func (m *ProtectorData) GetSalt() []byte { + if m != nil { + return m.Salt + } + return nil +} + +func (m *ProtectorData) GetUid() int64 { + if m != nil { + return m.Uid + } + return 0 +} + +func (m *ProtectorData) GetWrappedKey() *WrappedKeyData { + if m != nil { + return m.WrappedKey + } + return nil +} + +// Encryption policy specifics, corresponds to the fscrypt_policy struct +type EncryptionOptions struct { + Padding int64 `protobuf:"varint,1,opt,name=padding,proto3" json:"padding,omitempty"` + Contents EncryptionOptions_Mode `protobuf:"varint,2,opt,name=contents,proto3,enum=metadata.EncryptionOptions_Mode" json:"contents,omitempty"` + Filenames EncryptionOptions_Mode `protobuf:"varint,3,opt,name=filenames,proto3,enum=metadata.EncryptionOptions_Mode" json:"filenames,omitempty"` + PolicyVersion int64 `protobuf:"varint,4,opt,name=policy_version,json=policyVersion,proto3" json:"policy_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptionOptions) Reset() { *m = EncryptionOptions{} } +func (m *EncryptionOptions) String() string { return proto.CompactTextString(m) } +func (*EncryptionOptions) ProtoMessage() {} +func (*EncryptionOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{3} +} +func (m *EncryptionOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptionOptions.Unmarshal(m, b) +} +func (m *EncryptionOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptionOptions.Marshal(b, m, deterministic) +} +func (dst *EncryptionOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptionOptions.Merge(dst, src) +} +func (m *EncryptionOptions) XXX_Size() int { + return xxx_messageInfo_EncryptionOptions.Size(m) +} +func (m *EncryptionOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptionOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptionOptions proto.InternalMessageInfo + +func (m *EncryptionOptions) GetPadding() int64 { + if m != nil { + return m.Padding + } + return 0 +} + +func (m *EncryptionOptions) GetContents() EncryptionOptions_Mode { + if m != nil { + return m.Contents + } + return EncryptionOptions_default +} + +func (m *EncryptionOptions) GetFilenames() EncryptionOptions_Mode { + if m != nil { + return m.Filenames + } + return EncryptionOptions_default +} + +func (m *EncryptionOptions) GetPolicyVersion() int64 { + if m != nil { + return m.PolicyVersion + } + return 0 +} + +type WrappedPolicyKey struct { + ProtectorDescriptor string `protobuf:"bytes,1,opt,name=protector_descriptor,json=protectorDescriptor,proto3" json:"protector_descriptor,omitempty"` + WrappedKey *WrappedKeyData `protobuf:"bytes,2,opt,name=wrapped_key,json=wrappedKey,proto3" json:"wrapped_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WrappedPolicyKey) Reset() { *m = WrappedPolicyKey{} } +func (m *WrappedPolicyKey) String() string { return proto.CompactTextString(m) } +func (*WrappedPolicyKey) ProtoMessage() {} +func (*WrappedPolicyKey) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{4} +} +func (m *WrappedPolicyKey) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WrappedPolicyKey.Unmarshal(m, b) +} +func (m *WrappedPolicyKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WrappedPolicyKey.Marshal(b, m, deterministic) +} +func (dst *WrappedPolicyKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_WrappedPolicyKey.Merge(dst, src) +} +func (m *WrappedPolicyKey) XXX_Size() int { + return xxx_messageInfo_WrappedPolicyKey.Size(m) +} +func (m *WrappedPolicyKey) XXX_DiscardUnknown() { + xxx_messageInfo_WrappedPolicyKey.DiscardUnknown(m) +} + +var xxx_messageInfo_WrappedPolicyKey proto.InternalMessageInfo + +func (m *WrappedPolicyKey) GetProtectorDescriptor() string { + if m != nil { + return m.ProtectorDescriptor + } + return "" +} + +func (m *WrappedPolicyKey) GetWrappedKey() *WrappedKeyData { + if m != nil { + return m.WrappedKey + } + return nil +} + +// The associated data for each policy +type PolicyData struct { + KeyDescriptor string `protobuf:"bytes,1,opt,name=key_descriptor,json=keyDescriptor,proto3" json:"key_descriptor,omitempty"` + Options *EncryptionOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` + WrappedPolicyKeys []*WrappedPolicyKey `protobuf:"bytes,3,rep,name=wrapped_policy_keys,json=wrappedPolicyKeys,proto3" json:"wrapped_policy_keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PolicyData) Reset() { *m = PolicyData{} } +func (m *PolicyData) String() string { return proto.CompactTextString(m) } +func (*PolicyData) ProtoMessage() {} +func (*PolicyData) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{5} +} +func (m *PolicyData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PolicyData.Unmarshal(m, b) +} +func (m *PolicyData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PolicyData.Marshal(b, m, deterministic) +} +func (dst *PolicyData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyData.Merge(dst, src) +} +func (m *PolicyData) XXX_Size() int { + return xxx_messageInfo_PolicyData.Size(m) +} +func (m *PolicyData) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyData.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyData proto.InternalMessageInfo + +func (m *PolicyData) GetKeyDescriptor() string { + if m != nil { + return m.KeyDescriptor + } + return "" +} + +func (m *PolicyData) GetOptions() *EncryptionOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *PolicyData) GetWrappedPolicyKeys() []*WrappedPolicyKey { + if m != nil { + return m.WrappedPolicyKeys + } + return nil +} + +// Data stored in the config file +type Config struct { + Source SourceType `protobuf:"varint,1,opt,name=source,proto3,enum=metadata.SourceType" json:"source,omitempty"` + HashCosts *HashingCosts `protobuf:"bytes,2,opt,name=hash_costs,json=hashCosts,proto3" json:"hash_costs,omitempty"` + Options *EncryptionOptions `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"` + UseFsKeyringForV1Policies bool `protobuf:"varint,5,opt,name=use_fs_keyring_for_v1_policies,json=useFsKeyringForV1Policies,proto3" json:"use_fs_keyring_for_v1_policies,omitempty"` + AllowCrossUserMetadata bool `protobuf:"varint,6,opt,name=allow_cross_user_metadata,json=allowCrossUserMetadata,proto3" json:"allow_cross_user_metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Config) Reset() { *m = Config{} } +func (m *Config) String() string { return proto.CompactTextString(m) } +func (*Config) ProtoMessage() {} +func (*Config) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{6} +} +func (m *Config) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Config.Unmarshal(m, b) +} +func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Config.Marshal(b, m, deterministic) +} +func (dst *Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_Config.Merge(dst, src) +} +func (m *Config) XXX_Size() int { + return xxx_messageInfo_Config.Size(m) +} +func (m *Config) XXX_DiscardUnknown() { + xxx_messageInfo_Config.DiscardUnknown(m) +} + +var xxx_messageInfo_Config proto.InternalMessageInfo + +func (m *Config) GetSource() SourceType { + if m != nil { + return m.Source + } + return SourceType_default +} + +func (m *Config) GetHashCosts() *HashingCosts { + if m != nil { + return m.HashCosts + } + return nil +} + +func (m *Config) GetOptions() *EncryptionOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *Config) GetUseFsKeyringForV1Policies() bool { + if m != nil { + return m.UseFsKeyringForV1Policies + } + return false +} + +func (m *Config) GetAllowCrossUserMetadata() bool { + if m != nil { + return m.AllowCrossUserMetadata + } + return false +} + +func init() { + proto.RegisterType((*HashingCosts)(nil), "metadata.HashingCosts") + proto.RegisterType((*WrappedKeyData)(nil), "metadata.WrappedKeyData") + proto.RegisterType((*ProtectorData)(nil), "metadata.ProtectorData") + proto.RegisterType((*EncryptionOptions)(nil), "metadata.EncryptionOptions") + proto.RegisterType((*WrappedPolicyKey)(nil), "metadata.WrappedPolicyKey") + proto.RegisterType((*PolicyData)(nil), "metadata.PolicyData") + proto.RegisterType((*Config)(nil), "metadata.Config") + proto.RegisterEnum("metadata.SourceType", SourceType_name, SourceType_value) + proto.RegisterEnum("metadata.EncryptionOptions_Mode", EncryptionOptions_Mode_name, EncryptionOptions_Mode_value) +} + +func init() { proto.RegisterFile("metadata/metadata.proto", fileDescriptor_metadata_31965d2849cb292a) } + +var fileDescriptor_metadata_31965d2849cb292a = []byte{ + // 748 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xdb, 0x6a, 0xf3, 0x46, + 0x10, 0xae, 0x24, 0xc7, 0x87, 0xf1, 0xa1, 0xca, 0xfe, 0x69, 0xaa, 0xb4, 0x50, 0x8c, 0x4b, 0x20, + 0x94, 0x90, 0x62, 0x97, 0x94, 0x06, 0x4a, 0x21, 0x75, 0x92, 0x36, 0x09, 0xa1, 0xe9, 0xda, 0x75, + 0x5b, 0x28, 0x88, 0x8d, 0xb4, 0xb6, 0x17, 0x4b, 0x5a, 0xb1, 0xbb, 0x8a, 0xd1, 0x5d, 0xef, 0xfa, + 0x00, 0x7d, 0x97, 0xf6, 0x65, 0xfa, 0x30, 0x45, 0x2b, 0xc9, 0x87, 0x04, 0x42, 0xf2, 0xdf, 0x98, + 0xd9, 0x6f, 0x67, 0xe6, 0x9b, 0xf9, 0x66, 0xc7, 0x82, 0x8f, 0x43, 0xaa, 0x88, 0x4f, 0x14, 0xf9, + 0xb2, 0x34, 0x4e, 0x62, 0xc1, 0x15, 0x47, 0xf5, 0xf2, 0xdc, 0xfb, 0x03, 0x5a, 0x3f, 0x12, 0x39, + 0x67, 0xd1, 0x6c, 0xc8, 0xa5, 0x92, 0x08, 0x41, 0x45, 0xb1, 0x90, 0x3a, 0x66, 0xd7, 0x38, 0xb2, + 0xb0, 0xb6, 0xd1, 0x3e, 0x54, 0x43, 0x1a, 0x72, 0x91, 0x3a, 0x96, 0x46, 0x8b, 0x13, 0xea, 0x42, + 0x33, 0x26, 0x82, 0x04, 0x01, 0x0d, 0x98, 0x0c, 0x9d, 0x8a, 0xbe, 0xdc, 0x84, 0x7a, 0xbf, 0x43, + 0xe7, 0x57, 0x41, 0xe2, 0x98, 0xfa, 0xb7, 0x34, 0xbd, 0x20, 0x8a, 0xa0, 0x0e, 0x98, 0xd7, 0x13, + 0xc7, 0xe8, 0x1a, 0x47, 0x2d, 0x6c, 0x5e, 0x4f, 0xd0, 0xe7, 0xd0, 0xa6, 0x91, 0x27, 0xd2, 0x58, + 0x51, 0xdf, 0x5d, 0xd0, 0x54, 0x13, 0xb7, 0x70, 0x6b, 0x05, 0xde, 0xd2, 0x34, 0x2b, 0x6a, 0x1e, + 0x12, 0x4f, 0xd3, 0xb7, 0xb0, 0xb6, 0x7b, 0x7f, 0x9b, 0xd0, 0xbe, 0x17, 0x5c, 0x51, 0x4f, 0x71, + 0xa1, 0x53, 0xf7, 0x61, 0x2f, 0x2e, 0x01, 0xd7, 0xa7, 0xd2, 0x13, 0x2c, 0x56, 0x5c, 0x68, 0xb2, + 0x06, 0x7e, 0xb7, 0xba, 0xbb, 0x58, 0x5d, 0xa1, 0x63, 0xa8, 0x4a, 0x9e, 0x08, 0x2f, 0xef, 0xb7, + 0x33, 0xd8, 0x3b, 0x59, 0x09, 0x35, 0xd2, 0xf8, 0x38, 0x8d, 0x29, 0x2e, 0x7c, 0xb2, 0x32, 0x22, + 0x12, 0x52, 0x5d, 0x46, 0x03, 0x6b, 0x1b, 0x1d, 0xc3, 0x8e, 0x97, 0x09, 0xa7, 0xbb, 0x6f, 0x0e, + 0xf6, 0xd7, 0x09, 0x36, 0x65, 0xc5, 0xb9, 0x53, 0x96, 0x41, 0x92, 0x40, 0x39, 0x3b, 0x79, 0x23, + 0x99, 0x8d, 0x6c, 0xb0, 0x12, 0xe6, 0x3b, 0x55, 0xad, 0x5e, 0x66, 0xa2, 0x33, 0x68, 0x2e, 0x73, + 0xd5, 0xb4, 0x22, 0x35, 0x9d, 0xd9, 0x59, 0x67, 0xde, 0x96, 0x14, 0xc3, 0x72, 0x75, 0xee, 0xfd, + 0x67, 0xc2, 0xee, 0x65, 0x2e, 0x1d, 0xe3, 0xd1, 0x4f, 0xfa, 0x57, 0x22, 0x07, 0x6a, 0x31, 0xf1, + 0x7d, 0x16, 0xcd, 0xb4, 0x18, 0x16, 0x2e, 0x8f, 0xe8, 0x5b, 0xa8, 0x7b, 0x3c, 0x52, 0x34, 0x52, + 0xb2, 0x90, 0xa0, 0xbb, 0xe6, 0x79, 0x96, 0xe8, 0xe4, 0x8e, 0xfb, 0x14, 0xaf, 0x22, 0xd0, 0x77, + 0xd0, 0x98, 0xb2, 0x80, 0x66, 0x42, 0x48, 0xad, 0xca, 0x6b, 0xc2, 0xd7, 0x21, 0xe8, 0x10, 0x3a, + 0x31, 0x0f, 0x98, 0x97, 0xba, 0x8f, 0x54, 0x48, 0xc6, 0xa3, 0xe2, 0x0d, 0xb5, 0x73, 0x74, 0x92, + 0x83, 0xbd, 0xbf, 0x0c, 0xa8, 0x64, 0xa1, 0xa8, 0x09, 0x35, 0x9f, 0x4e, 0x49, 0x12, 0x28, 0xfb, + 0x03, 0xf4, 0x21, 0x34, 0xcf, 0x2f, 0x47, 0xee, 0xe0, 0xf4, 0x6b, 0xf7, 0xb7, 0xf1, 0xc8, 0x36, + 0x36, 0x81, 0x1f, 0x86, 0x77, 0xb6, 0xb9, 0x09, 0x0c, 0xbf, 0x1f, 0xda, 0xd6, 0x16, 0x30, 0x1e, + 0xd9, 0x95, 0x12, 0xe8, 0x0f, 0xbe, 0xd1, 0x1e, 0x3b, 0x5b, 0xc0, 0x78, 0x64, 0x57, 0x51, 0x0b, + 0xea, 0xe7, 0x3e, 0x23, 0x91, 0x4a, 0x42, 0xbb, 0xd1, 0xfb, 0xd3, 0x00, 0xbb, 0x50, 0xff, 0x5e, + 0x97, 0x98, 0xbd, 0xce, 0xf7, 0x78, 0x77, 0x4f, 0x26, 0x6c, 0xbe, 0x61, 0xc2, 0xff, 0x18, 0x00, + 0x39, 0xb7, 0x7e, 0xf4, 0x87, 0xd0, 0x59, 0xd0, 0xf4, 0x39, 0x6d, 0x7b, 0x41, 0xd3, 0x0d, 0xc2, + 0x53, 0xa8, 0xf1, 0x7c, 0x08, 0x05, 0xd9, 0xa7, 0x2f, 0xcc, 0x09, 0x97, 0xbe, 0xe8, 0x06, 0xde, + 0x95, 0x75, 0x16, 0x83, 0x5a, 0xd0, 0x34, 0x1b, 0xb5, 0x75, 0xd4, 0x1c, 0x7c, 0xf2, 0xac, 0xde, + 0x95, 0x26, 0x78, 0x77, 0xf9, 0x04, 0x91, 0xbd, 0x7f, 0x4d, 0xa8, 0x0e, 0x79, 0x34, 0x65, 0xb3, + 0x8d, 0xb5, 0x33, 0x5e, 0xb1, 0x76, 0xa7, 0x00, 0x73, 0x22, 0xe7, 0x6e, 0xbe, 0x67, 0xe6, 0x8b, + 0x7b, 0xd6, 0xc8, 0x3c, 0xf3, 0x7f, 0xb2, 0x8d, 0x96, 0x2b, 0x6f, 0x68, 0xf9, 0x1c, 0x3e, 0x4b, + 0x24, 0x75, 0xa7, 0x32, 0x6b, 0x55, 0xb0, 0x68, 0xe6, 0x4e, 0xb9, 0x70, 0x1f, 0xfb, 0xb9, 0x00, + 0x8c, 0x4a, 0xbd, 0xbc, 0x75, 0x7c, 0x90, 0x48, 0x7a, 0x25, 0x6f, 0x73, 0x9f, 0x2b, 0x2e, 0x26, + 0xfd, 0xfb, 0xc2, 0x01, 0x9d, 0xc1, 0x01, 0x09, 0x02, 0xbe, 0x74, 0x3d, 0xc1, 0xa5, 0x74, 0x13, + 0x49, 0x85, 0x5b, 0x52, 0xeb, 0x3d, 0xaf, 0xe3, 0x7d, 0xed, 0x30, 0xcc, 0xee, 0x7f, 0x91, 0x54, + 0xdc, 0x15, 0xb7, 0x37, 0x95, 0xba, 0x65, 0x57, 0x70, 0xdb, 0xe3, 0x61, 0x4c, 0x14, 0x7b, 0x60, + 0x01, 0x53, 0xe9, 0x17, 0x3f, 0x03, 0xac, 0x65, 0xd9, 0x5e, 0x02, 0x04, 0x9d, 0x98, 0x84, 0x6e, + 0x4c, 0xa4, 0x8c, 0xe7, 0x82, 0x48, 0x6a, 0x1b, 0xe8, 0x23, 0xd8, 0xf5, 0x12, 0xa9, 0xf8, 0x16, + 0x6c, 0x66, 0x71, 0x82, 0x2c, 0xb3, 0xae, 0x6c, 0xeb, 0xa1, 0xaa, 0xbf, 0x03, 0x5f, 0xfd, 0x1f, + 0x00, 0x00, 0xff, 0xff, 0xe2, 0x78, 0x9e, 0x2e, 0x22, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/google/fscrypt/metadata/metadata.proto b/vendor/github.com/google/fscrypt/metadata/metadata.proto new file mode 100644 index 000000000..84245e020 --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/metadata.proto @@ -0,0 +1,107 @@ +/* + * metadata.proto - File which contains all of the metadata structures which we + * write to metadata files. Must be compiled with protoc to use the library. + * Compilation can be invoked with go generate. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// If you modify this file, be sure to run "go generate" on this package. +syntax = "proto3"; +package metadata; + +// Cost parameters to be used in our hashing functions. +message HashingCosts { + int64 time = 2; + int64 memory = 3; + int64 parallelism = 4; +} + +// This structure is used for our authenticated wrapping/unwrapping of keys. +message WrappedKeyData { + bytes IV = 1; + bytes encrypted_key = 2; + bytes hmac = 3; +} + +// Specifies the method in which an outside secret is obtained for a Protector +enum SourceType { + default = 0; + pam_passphrase = 1; + custom_passphrase = 2; + raw_key = 3; +} + +// The associated data for each protector +message ProtectorData { + string protector_descriptor = 1; + SourceType source = 2; + + // These are only used by some of the protector types + string name = 3; + HashingCosts costs = 4; + bytes salt = 5; + int64 uid = 6; + + WrappedKeyData wrapped_key = 7; +} + +// Encryption policy specifics, corresponds to the fscrypt_policy struct +message EncryptionOptions { + int64 padding = 1; + + // Type of encryption; should match declarations of unix.FSCRYPT_MODE + enum Mode { + default = 0; + AES_256_XTS = 1; + AES_256_GCM = 2; + AES_256_CBC = 3; + AES_256_CTS = 4; + AES_128_CBC = 5; + AES_128_CTS = 6; + Adiantum = 9; + } + + Mode contents = 2; + Mode filenames = 3; + + int64 policy_version = 4; +} + +message WrappedPolicyKey { + string protector_descriptor = 1; + WrappedKeyData wrapped_key = 2; +} + +// The associated data for each policy +message PolicyData { + string key_descriptor = 1; + EncryptionOptions options = 2; + repeated WrappedPolicyKey wrapped_policy_keys = 3; +} + +// Data stored in the config file +message Config { + SourceType source = 1; + HashingCosts hash_costs = 2; + EncryptionOptions options = 4; + bool use_fs_keyring_for_v1_policies = 5; + bool allow_cross_user_metadata = 6; + + // reserve the removed field 'string compatibility = 3;' + reserved 3; + reserved "compatibility"; +} diff --git a/vendor/github.com/google/fscrypt/metadata/policy.go b/vendor/github.com/google/fscrypt/metadata/policy.go new file mode 100644 index 000000000..e218a0814 --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/policy.go @@ -0,0 +1,348 @@ +/* + * policy.go - Functions for getting and setting policies on a specified + * directory or file. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package metadata + +import ( + "encoding/hex" + "fmt" + "log" + "math" + "os" + "os/user" + "strconv" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/util" +) + +var ( + // ErrEncryptionNotSupported indicates that encryption is not supported + // on the given filesystem, and there is no way to enable it. + ErrEncryptionNotSupported = errors.New("encryption not supported") + + // ErrEncryptionNotEnabled indicates that encryption is not supported on + // the given filesystem, but there is a way to enable it. + ErrEncryptionNotEnabled = errors.New("encryption not enabled") +) + +// ErrAlreadyEncrypted indicates that the path is already encrypted. +type ErrAlreadyEncrypted struct { + Path string +} + +func (err *ErrAlreadyEncrypted) Error() string { + return fmt.Sprintf("file or directory %q is already encrypted", err.Path) +} + +// ErrBadEncryptionOptions indicates that unsupported encryption options were given. +type ErrBadEncryptionOptions struct { + Path string + Options *EncryptionOptions +} + +func (err *ErrBadEncryptionOptions) Error() string { + return fmt.Sprintf(`cannot encrypt %q because the kernel doesn't support the requested encryption options. + + The options are %s`, err.Path, err.Options) +} + +// ErrDirectoryNotOwned indicates a directory can't be encrypted because it's +// owned by another user. +type ErrDirectoryNotOwned struct { + Path string + Owner uint32 +} + +func (err *ErrDirectoryNotOwned) Error() string { + owner := strconv.Itoa(int(err.Owner)) + if u, e := user.LookupId(owner); e == nil && u.Username != "" { + owner = u.Username + } + return fmt.Sprintf(`cannot encrypt %q because it's owned by another user (%s). + + Encryption can only be enabled on a directory you own, even if you have + write access to the directory.`, err.Path, owner) +} + +// ErrNotEncrypted indicates that the path is not encrypted. +type ErrNotEncrypted struct { + Path string +} + +func (err *ErrNotEncrypted) Error() string { + return fmt.Sprintf("file or directory %q is not encrypted", err.Path) +} + +func policyIoctl(file *os.File, request uintptr, arg unsafe.Pointer) error { + _, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), request, uintptr(arg)) + if errno == 0 { + return nil + } + return errno +} + +// Maps EncryptionOptions.Padding <-> FSCRYPT_POLICY_FLAGS +var ( + paddingArray = []int64{4, 8, 16, 32} + flagsArray = []int64{unix.FSCRYPT_POLICY_FLAGS_PAD_4, unix.FSCRYPT_POLICY_FLAGS_PAD_8, + unix.FSCRYPT_POLICY_FLAGS_PAD_16, unix.FSCRYPT_POLICY_FLAGS_PAD_32} +) + +// flagsToPadding returns the amount of padding specified in the policy flags. +func flagsToPadding(flags uint8) int64 { + paddingFlag := int64(flags & unix.FS_POLICY_FLAGS_PAD_MASK) + + // This lookup should always succeed + padding, ok := util.Lookup(paddingFlag, flagsArray, paddingArray) + if !ok { + log.Panicf("padding flag of %x not found", paddingFlag) + } + return padding +} + +func buildV1PolicyData(policy *unix.FscryptPolicyV1) *PolicyData { + return &PolicyData{ + KeyDescriptor: hex.EncodeToString(policy.Master_key_descriptor[:]), + Options: &EncryptionOptions{ + Padding: flagsToPadding(policy.Flags), + Contents: EncryptionOptions_Mode(policy.Contents_encryption_mode), + Filenames: EncryptionOptions_Mode(policy.Filenames_encryption_mode), + PolicyVersion: 1, + }, + } +} + +func buildV2PolicyData(policy *unix.FscryptPolicyV2) *PolicyData { + return &PolicyData{ + KeyDescriptor: hex.EncodeToString(policy.Master_key_identifier[:]), + Options: &EncryptionOptions{ + Padding: flagsToPadding(policy.Flags), + Contents: EncryptionOptions_Mode(policy.Contents_encryption_mode), + Filenames: EncryptionOptions_Mode(policy.Filenames_encryption_mode), + PolicyVersion: 2, + }, + } +} + +// GetPolicy returns the Policy data for the given directory or file (includes +// the KeyDescriptor and the encryption options). Returns an error if the +// path is not encrypted or the policy couldn't be retrieved. +func GetPolicy(path string) (*PolicyData, error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + // First try the new version of the ioctl. This works for both v1 and v2 policies. + var arg unix.FscryptGetPolicyExArg + arg.Size = uint64(unsafe.Sizeof(arg.Policy)) + policyPtr := util.Ptr(arg.Policy[:]) + err = policyIoctl(file, unix.FS_IOC_GET_ENCRYPTION_POLICY_EX, unsafe.Pointer(&arg)) + if err == unix.ENOTTY { + // Fall back to the old version of the ioctl. This works for v1 policies only. + err = policyIoctl(file, unix.FS_IOC_GET_ENCRYPTION_POLICY, policyPtr) + arg.Size = uint64(unsafe.Sizeof(unix.FscryptPolicyV1{})) + } + switch err { + case nil: + break + case unix.ENOTTY: + return nil, ErrEncryptionNotSupported + case unix.EOPNOTSUPP: + return nil, ErrEncryptionNotEnabled + case unix.ENODATA, unix.ENOENT: + // ENOENT was returned instead of ENODATA on some filesystems before v4.11. + return nil, &ErrNotEncrypted{path} + default: + return nil, errors.Wrapf(err, "failed to get encryption policy of %q", path) + } + switch arg.Policy[0] { // arg.policy.version + case unix.FSCRYPT_POLICY_V1: + if arg.Size != uint64(unsafe.Sizeof(unix.FscryptPolicyV1{})) { + // should never happen + return nil, errors.New("unexpected size for v1 policy") + } + return buildV1PolicyData((*unix.FscryptPolicyV1)(policyPtr)), nil + case unix.FSCRYPT_POLICY_V2: + if arg.Size != uint64(unsafe.Sizeof(unix.FscryptPolicyV2{})) { + // should never happen + return nil, errors.New("unexpected size for v2 policy") + } + return buildV2PolicyData((*unix.FscryptPolicyV2)(policyPtr)), nil + default: + return nil, errors.Errorf("unsupported encryption policy version [%d]", + arg.Policy[0]) + } +} + +// For improved performance, use the DIRECT_KEY flag when using ciphers that +// support it, e.g. Adiantum. It is safe because fscrypt won't reuse the key +// for any other policy. (Multiple directories with same policy are okay.) +func shouldUseDirectKeyFlag(options *EncryptionOptions) bool { + // Contents and filenames encryption modes must be the same + if options.Contents != options.Filenames { + return false + } + // Currently only Adiantum supports DIRECT_KEY. + return options.Contents == EncryptionOptions_Adiantum +} + +func buildPolicyFlags(options *EncryptionOptions) uint8 { + // This lookup should always succeed (as policy is valid) + flags, ok := util.Lookup(options.Padding, paddingArray, flagsArray) + if !ok { + log.Panicf("padding of %d was not found", options.Padding) + } + if shouldUseDirectKeyFlag(options) { + flags |= unix.FSCRYPT_POLICY_FLAG_DIRECT_KEY + } + return uint8(flags) +} + +func setV1Policy(file *os.File, options *EncryptionOptions, descriptorBytes []byte) error { + policy := unix.FscryptPolicyV1{ + Version: unix.FSCRYPT_POLICY_V1, + Contents_encryption_mode: uint8(options.Contents), + Filenames_encryption_mode: uint8(options.Filenames), + Flags: uint8(buildPolicyFlags(options)), + } + + // The descriptor should always be the correct length (as policy is valid) + if len(descriptorBytes) != unix.FSCRYPT_KEY_DESCRIPTOR_SIZE { + log.Panic("wrong descriptor size for v1 policy") + } + copy(policy.Master_key_descriptor[:], descriptorBytes) + + return policyIoctl(file, unix.FS_IOC_SET_ENCRYPTION_POLICY, unsafe.Pointer(&policy)) +} + +func setV2Policy(file *os.File, options *EncryptionOptions, descriptorBytes []byte) error { + policy := unix.FscryptPolicyV2{ + Version: unix.FSCRYPT_POLICY_V2, + Contents_encryption_mode: uint8(options.Contents), + Filenames_encryption_mode: uint8(options.Filenames), + Flags: uint8(buildPolicyFlags(options)), + } + + // The descriptor should always be the correct length (as policy is valid) + if len(descriptorBytes) != unix.FSCRYPT_KEY_IDENTIFIER_SIZE { + log.Panic("wrong descriptor size for v2 policy") + } + copy(policy.Master_key_identifier[:], descriptorBytes) + + return policyIoctl(file, unix.FS_IOC_SET_ENCRYPTION_POLICY, unsafe.Pointer(&policy)) +} + +// SetPolicy sets up the specified directory to be encrypted with the specified +// policy. Returns an error if we cannot set the policy for any reason (not a +// directory, invalid options or KeyDescriptor, etc). +func SetPolicy(path string, data *PolicyData) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + if err = data.CheckValidity(); err != nil { + return errors.Wrap(err, "invalid policy") + } + + descriptorBytes, err := hex.DecodeString(data.KeyDescriptor) + if err != nil { + return errors.New("invalid key descriptor: " + data.KeyDescriptor) + } + + switch data.Options.PolicyVersion { + case 1: + err = setV1Policy(file, data.Options, descriptorBytes) + case 2: + err = setV2Policy(file, data.Options, descriptorBytes) + default: + err = errors.Errorf("policy version of %d is invalid", data.Options.PolicyVersion) + } + if err == unix.EINVAL { + // Before kernel v4.11, many different errors all caused unix.EINVAL to be returned. + // We try to disambiguate this error here. This disambiguation will not always give + // the correct error due to a potential race condition on path. + if info, statErr := os.Stat(path); statErr != nil || !info.IsDir() { + // Checking if the path is not a directory + err = unix.ENOTDIR + } else if _, policyErr := GetPolicy(path); policyErr == nil { + // Checking if a policy is already set on this directory + err = unix.EEXIST + } + } + switch err { + case nil: + return nil + case unix.EACCES: + var stat unix.Stat_t + if statErr := unix.Stat(path, &stat); statErr == nil && stat.Uid != uint32(os.Geteuid()) { + return &ErrDirectoryNotOwned{path, stat.Uid} + } + case unix.EEXIST: + return &ErrAlreadyEncrypted{path} + case unix.EINVAL: + return &ErrBadEncryptionOptions{path, data.Options} + case unix.ENOTTY: + return ErrEncryptionNotSupported + case unix.EOPNOTSUPP: + return ErrEncryptionNotEnabled + } + return errors.Wrapf(err, "failed to set encryption policy on %q", path) +} + +// CheckSupport returns an error if the filesystem containing path does not +// support filesystem encryption. This can be for many reasons including an +// incompatible kernel or filesystem or not enabling the right feature flags. +func CheckSupport(path string) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + // On supported directories, giving a bad policy will return EINVAL + badPolicy := unix.FscryptPolicyV1{ + Version: math.MaxUint8, + Contents_encryption_mode: math.MaxUint8, + Filenames_encryption_mode: math.MaxUint8, + Flags: math.MaxUint8, + } + + err = policyIoctl(file, unix.FS_IOC_SET_ENCRYPTION_POLICY, unsafe.Pointer(&badPolicy)) + switch err { + case nil: + log.Panicf(`FS_IOC_SET_ENCRYPTION_POLICY succeeded when it should have failed. + Please open an issue, filesystem %q may be corrupted.`, path) + case unix.EINVAL, unix.EACCES: + return nil + case unix.ENOTTY: + return ErrEncryptionNotSupported + case unix.EOPNOTSUPP: + return ErrEncryptionNotEnabled + } + return errors.Wrapf(err, "unexpected error checking for encryption support on filesystem %q", path) +} diff --git a/vendor/github.com/google/fscrypt/security/cache.go b/vendor/github.com/google/fscrypt/security/cache.go new file mode 100644 index 000000000..f11248d2b --- /dev/null +++ b/vendor/github.com/google/fscrypt/security/cache.go @@ -0,0 +1,49 @@ +/* + * cache.go - Handles cache clearing and management. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package security + +import ( + "log" + "os" + + "golang.org/x/sys/unix" +) + +// DropFilesystemCache instructs the kernel to free the reclaimable inodes and +// dentries. This has the effect of making encrypted directories whose keys are +// not present no longer accessible. Requires root privileges. +func DropFilesystemCache() error { + // Dirty reclaimable inodes must be synced so that they will be freed. + log.Print("syncing changes to filesystem") + unix.Sync() + + // See: https://www.kernel.org/doc/Documentation/sysctl/vm.txt + log.Print("freeing reclaimable inodes and dentries") + file, err := os.OpenFile("/proc/sys/vm/drop_caches", os.O_WRONLY|os.O_SYNC, 0) + if err != nil { + return err + } + defer file.Close() + // "2" just frees the reclaimable inodes and dentries. The associated + // pages to these inodes will be freed. We do not need to free the + // entire pagecache (as this will severely impact performance). + _, err = file.WriteString("2") + return err +} diff --git a/vendor/github.com/google/fscrypt/security/privileges.go b/vendor/github.com/google/fscrypt/security/privileges.go new file mode 100644 index 000000000..5bdd43c5d --- /dev/null +++ b/vendor/github.com/google/fscrypt/security/privileges.go @@ -0,0 +1,156 @@ +/* + * privileges.go - Functions for managing users and privileges. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package security manages: +// - Cache clearing (cache.go) +// - Privilege manipulation (privileges.go) +package security + +// Use the libc versions of setreuid, setregid, and setgroups instead of the +// "sys/unix" versions. The "sys/unix" versions use the raw syscalls which +// operate on the calling thread only, whereas the libc versions operate on the +// whole process. And we need to operate on the whole process, firstly for +// pam_fscrypt to prevent the privileges of Go worker threads from diverging +// from the PAM stack's "main" thread, violating libc's assumption and causing +// an abort() later in the PAM stack; and secondly because Go code may migrate +// between OS-level threads while it's running. +// +// See also: https://github.com/golang/go/issues/1435 + +/* +#define _GNU_SOURCE // for getresuid and setresuid +#include +#include // getting and setting uids and gids +#include // setgroups +*/ +import "C" + +import ( + "log" + "os/user" + "syscall" + + "github.com/pkg/errors" + + "github.com/google/fscrypt/util" +) + +// Privileges encapsulate the effective uid/gid and groups of a process. +type Privileges struct { + euid C.uid_t + egid C.gid_t + groups []C.gid_t +} + +// ProcessPrivileges returns the process's current effective privileges. +func ProcessPrivileges() (*Privileges, error) { + ruid := C.getuid() + euid := C.geteuid() + rgid := C.getgid() + egid := C.getegid() + + var groups []C.gid_t + n, err := C.getgroups(0, nil) + if n < 0 { + return nil, err + } + // If n == 0, the user isn't in any groups, so groups == nil is fine. + if n > 0 { + groups = make([]C.gid_t, n) + n, err = C.getgroups(n, &groups[0]) + if n < 0 { + return nil, err + } + groups = groups[:n] + } + log.Printf("Current privs (real, effective): uid=(%d,%d) gid=(%d,%d) groups=%v", + ruid, euid, rgid, egid, groups) + return &Privileges{euid, egid, groups}, nil +} + +// UserPrivileges returns the default privileges for the specified user. +func UserPrivileges(user *user.User) (*Privileges, error) { + privs := &Privileges{ + euid: C.uid_t(util.AtoiOrPanic(user.Uid)), + egid: C.gid_t(util.AtoiOrPanic(user.Gid)), + } + userGroups, err := user.GroupIds() + if err != nil { + return nil, util.SystemError(err.Error()) + } + privs.groups = make([]C.gid_t, len(userGroups)) + for i, group := range userGroups { + privs.groups[i] = C.gid_t(util.AtoiOrPanic(group)) + } + return privs, nil +} + +// SetProcessPrivileges sets the privileges of the current process to have those +// specified by privs. The original privileges can be obtained by first saving +// the output of ProcessPrivileges, calling SetProcessPrivileges with the +// desired privs, then calling SetProcessPrivileges with the saved privs. +func SetProcessPrivileges(privs *Privileges) error { + log.Printf("Setting euid=%d egid=%d groups=%v", privs.euid, privs.egid, privs.groups) + + // If setting privs as root, we need to set the euid to 0 first, so that + // we will have the necessary permissions to make the other changes to + // the groups/egid/euid, regardless of our original euid. + C.seteuid(0) + + // Separately handle the case where the user is in no groups. + numGroups := C.size_t(len(privs.groups)) + groupsPtr := (*C.gid_t)(nil) + if numGroups > 0 { + groupsPtr = &privs.groups[0] + } + + if res, err := C.setgroups(numGroups, groupsPtr); res < 0 { + return errors.Wrapf(err.(syscall.Errno), "setting groups") + } + if res, err := C.setegid(privs.egid); res < 0 { + return errors.Wrapf(err.(syscall.Errno), "setting egid") + } + if res, err := C.seteuid(privs.euid); res < 0 { + return errors.Wrapf(err.(syscall.Errno), "setting euid") + } + ProcessPrivileges() + return nil +} + +// SetUids sets the process's real, effective, and saved UIDs. +func SetUids(ruid, euid, suid int) error { + log.Printf("Setting ruid=%d euid=%d suid=%d", ruid, euid, suid) + // We elevate all the privs before setting them. This prevents issues + // with (ruid=1000,euid=1000,suid=0), where just a single call to + // setresuid might fail with permission denied. + if res, err := C.setresuid(0, 0, 0); res < 0 { + return errors.Wrapf(err.(syscall.Errno), "setting uids") + } + if res, err := C.setresuid(C.uid_t(ruid), C.uid_t(euid), C.uid_t(suid)); res < 0 { + return errors.Wrapf(err.(syscall.Errno), "setting uids") + } + return nil +} + +// GetUids gets the process's real, effective, and saved UIDs. +func GetUids() (int, int, int) { + var ruid, euid, suid C.uid_t + C.getresuid(&ruid, &euid, &suid) + return int(ruid), int(euid), int(suid) +} diff --git a/vendor/github.com/google/fscrypt/util/errors.go b/vendor/github.com/google/fscrypt/util/errors.go new file mode 100644 index 000000000..3c87a2c4c --- /dev/null +++ b/vendor/github.com/google/fscrypt/util/errors.go @@ -0,0 +1,135 @@ +/* + * errors.go - Custom errors and error functions used by fscrypt + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package util + +import ( + "fmt" + "io" + "log" + "os" + + "github.com/pkg/errors" +) + +// ErrReader wraps an io.Reader, passing along calls to Read() until a read +// fails. Then, the error is stored, and all subsequent calls to Read() do +// nothing. This allows you to write code which has many subsequent reads and +// do all of the error checking at the end. For example: +// +// r := NewErrReader(reader) +// r.Read(foo) +// r.Read(bar) +// r.Read(baz) +// if r.Err() != nil { +// // Handle error +// } +// +// Taken from https://blog.golang.org/errors-are-values by Rob Pike. +type ErrReader struct { + r io.Reader + err error +} + +// NewErrReader creates an ErrReader which wraps the provided reader. +func NewErrReader(reader io.Reader) *ErrReader { + return &ErrReader{r: reader, err: nil} +} + +// Read runs ReadFull on the wrapped reader if no errors have occurred. +// Otherwise, the previous error is just returned and no reads are attempted. +func (e *ErrReader) Read(p []byte) (n int, err error) { + if e.err == nil { + n, e.err = io.ReadFull(e.r, p) + } + return n, e.err +} + +// Err returns the first encountered err (or nil if no errors occurred). +func (e *ErrReader) Err() error { + return e.err +} + +// ErrWriter works exactly like ErrReader, except with io.Writer. +type ErrWriter struct { + w io.Writer + err error +} + +// NewErrWriter creates an ErrWriter which wraps the provided writer. +func NewErrWriter(writer io.Writer) *ErrWriter { + return &ErrWriter{w: writer, err: nil} +} + +// Write runs the wrapped writer's Write if no errors have occurred. Otherwise, +// the previous error is just returned and no writes are attempted. +func (e *ErrWriter) Write(p []byte) (n int, err error) { + if e.err == nil { + n, e.err = e.w.Write(p) + } + return n, e.err +} + +// Err returns the first encountered err (or nil if no errors occurred). +func (e *ErrWriter) Err() error { + return e.err +} + +// CheckValidLength returns an invalid length error if expected != actual +func CheckValidLength(expected, actual int) error { + if expected == actual { + return nil + } + return fmt.Errorf("expected length of %d, got %d", expected, actual) +} + +// SystemError is an error that should indicate something has gone wrong in the +// underlying system (syscall failure, bad ioctl, etc...). +type SystemError string + +func (s SystemError) Error() string { + return "system error: " + string(s) +} + +// NeverError panics if a non-nil error is passed in. It should be used to check +// for logic errors, not to handle recoverable errors. +func NeverError(err error) { + if err != nil { + log.Panicf("NeverError() check failed: %v", err) + } +} + +var ( + // testEnvVarName is the name of an environment variable that should be + // set to an empty mountpoint. This is only used for integration tests. + // If not set, integration tests are skipped. + testEnvVarName = "TEST_FILESYSTEM_ROOT" + // ErrSkipIntegration indicates integration tests shouldn't be run. + ErrSkipIntegration = errors.New("skipping integration test") +) + +// TestRoot returns a the root of a filesystem specified by testEnvVarName. This +// function is only used for integration tests. +func TestRoot() (string, error) { + path := os.Getenv(testEnvVarName) + if path == "" { + return "", ErrSkipIntegration + } + return path, nil +} diff --git a/vendor/github.com/google/fscrypt/util/util.go b/vendor/github.com/google/fscrypt/util/util.go new file mode 100644 index 000000000..1dab335b8 --- /dev/null +++ b/vendor/github.com/google/fscrypt/util/util.go @@ -0,0 +1,163 @@ +/* + * util.go - Various helpers used throughout fscrypt + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package util contains useful components for simplifying Go code. +// +// The package contains common error types (errors.go) and functions for +// converting arrays to pointers. +package util + +import ( + "bufio" + "fmt" + "log" + "os" + "os/user" + "strconv" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Ptr converts a Go byte array to a pointer to the start of the array. +func Ptr(slice []byte) unsafe.Pointer { + if len(slice) == 0 { + return nil + } + return unsafe.Pointer(&slice[0]) +} + +// ByteSlice takes a pointer to some data and views it as a slice of bytes. +// Note, indexing into this slice is unsafe. +func ByteSlice(ptr unsafe.Pointer) []byte { + // Slice must fit in the smallest address space go supports. + return (*[1 << 30]byte)(ptr)[:] +} + +// PointerSlice takes a pointer to an array of pointers and views it as a slice +// of pointers. Note, indexing into this slice is unsafe. +func PointerSlice(ptr unsafe.Pointer) []unsafe.Pointer { + // Slice must fit in the smallest address space go supports. + return (*[1 << 28]unsafe.Pointer)(ptr)[:] +} + +// Index returns the first index i such that inVal == inArray[i]. +// ok is true if we find a match, false otherwise. +func Index(inVal int64, inArray []int64) (index int, ok bool) { + for index, val := range inArray { + if val == inVal { + return index, true + } + } + return 0, false +} + +// Lookup finds inVal in inArray and returns the corresponding element in +// outArray. Specifically, if inVal == inArray[i], outVal == outArray[i]. +// ok is true if we find a match, false otherwise. +func Lookup(inVal int64, inArray, outArray []int64) (outVal int64, ok bool) { + index, ok := Index(inVal, inArray) + if !ok { + return 0, false + } + return outArray[index], true +} + +// MinInt returns the lesser of a and b. +func MinInt(a, b int) int { + if a < b { + return a + } + return b +} + +// MaxInt returns the greater of a and b. +func MaxInt(a, b int) int { + if a > b { + return a + } + return b +} + +// MinInt64 returns the lesser of a and b. +func MinInt64(a, b int64) int64 { + if a < b { + return a + } + return b +} + +// ReadLine returns a line of input from standard input. An empty string is +// returned if the user didn't insert anything or on error. +func ReadLine() (string, error) { + scanner := bufio.NewScanner(os.Stdin) + scanner.Scan() + return scanner.Text(), scanner.Err() +} + +// AtoiOrPanic converts a string to an int or it panics. Should only be used in +// situations where the input MUST be a decimal number. +func AtoiOrPanic(input string) int { + i, err := strconv.Atoi(input) + if err != nil { + panic(err) + } + return i +} + +// UserFromUID returns the User corresponding to the given user id. +func UserFromUID(uid int64) (*user.User, error) { + return user.LookupId(strconv.FormatInt(uid, 10)) +} + +// EffectiveUser returns the user entry corresponding to the effective user. +func EffectiveUser() (*user.User, error) { + return UserFromUID(int64(os.Geteuid())) +} + +// IsUserRoot checks if the effective user is root. +func IsUserRoot() bool { + return os.Geteuid() == 0 +} + +// Chown changes the owner of a File to a User. +func Chown(file *os.File, user *user.User) error { + uid := AtoiOrPanic(user.Uid) + gid := AtoiOrPanic(user.Gid) + return file.Chown(uid, gid) +} + +// IsKernelVersionAtLeast returns true if the Linux kernel version is at least +// major.minor. If something goes wrong it assumes false. +func IsKernelVersionAtLeast(major, minor int) bool { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + log.Printf("Uname failed [%v], assuming old kernel", err) + return false + } + release := string(uname.Release[:]) + log.Printf("Kernel version is %s", release) + var actualMajor, actualMinor int + if n, _ := fmt.Sscanf(release, "%d.%d", &actualMajor, &actualMinor); n != 2 { + log.Printf("Unrecognized uname format %q, assuming old kernel", release) + return false + } + return actualMajor > major || + (actualMajor == major && actualMinor >= minor) +} diff --git a/vendor/github.com/pkg/xattr/.gitignore b/vendor/github.com/pkg/xattr/.gitignore new file mode 100644 index 000000000..d8b32652e --- /dev/null +++ b/vendor/github.com/pkg/xattr/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test + +*.swp diff --git a/vendor/github.com/pkg/xattr/LICENSE b/vendor/github.com/pkg/xattr/LICENSE new file mode 100644 index 000000000..99d2e9dc8 --- /dev/null +++ b/vendor/github.com/pkg/xattr/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2012 Dave Cheney. All rights reserved. +Copyright (c) 2014 Kuba Podgórski. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/xattr/README.md b/vendor/github.com/pkg/xattr/README.md new file mode 100644 index 000000000..0662c0208 --- /dev/null +++ b/vendor/github.com/pkg/xattr/README.md @@ -0,0 +1,45 @@ +[![GoDoc](https://godoc.org/github.com/pkg/xattr?status.svg)](http://godoc.org/github.com/pkg/xattr) +[![Go Report Card](https://goreportcard.com/badge/github.com/pkg/xattr)](https://goreportcard.com/report/github.com/pkg/xattr) +[![Build Status](https://github.com/pkg/xattr/workflows/build/badge.svg)](https://github.com/pkg/xattr/actions?query=workflow%3Abuild) +[![Codecov](https://codecov.io/gh/pkg/xattr/branch/master/graph/badge.svg)](https://codecov.io/gh/pkg/xattr) + +xattr +===== +Extended attribute support for Go (linux + darwin + freebsd + netbsd + solaris). + +"Extended attributes are name:value pairs associated permanently with files and directories, similar to the environment strings associated with a process. An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty." [See more...](https://en.wikipedia.org/wiki/Extended_file_attributes) + +`SetWithFlags` allows to additionally pass system flags to be forwarded to the underlying calls. FreeBSD and NetBSD do not support this and the parameter will be ignored. + +The `L` variants of all functions (`LGet/LSet/...`) are identical to `Get/Set/...` except that they +do not reference a symlink that appears at the end of a path. See +[GoDoc](http://godoc.org/github.com/pkg/xattr) for details. + +### Example +```go + const path = "/tmp/myfile" + const prefix = "user." + + if err := xattr.Set(path, prefix+"test", []byte("test-attr-value")); err != nil { + log.Fatal(err) + } + + var list []string + if list, err = xattr.List(path); err != nil { + log.Fatal(err) + } + + var data []byte + if data, err = xattr.Get(path, prefix+"test"); err != nil { + log.Fatal(err) + } + + if err = xattr.Remove(path, prefix+"test"); err != nil { + log.Fatal(err) + } + + // One can also specify the flags parameter to be passed to the OS. + if err := xattr.SetWithFlags(path, prefix+"test", []byte("test-attr-value"), xattr.XATTR_CREATE); err != nil { + log.Fatal(err) + } +``` diff --git a/vendor/github.com/pkg/xattr/xattr.go b/vendor/github.com/pkg/xattr/xattr.go new file mode 100644 index 000000000..f982da304 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr.go @@ -0,0 +1,255 @@ +/* +Package xattr provides support for extended attributes on linux, darwin and freebsd. +Extended attributes are name:value pairs associated permanently with files and directories, +similar to the environment strings associated with a process. +An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty. +More details you can find here: https://en.wikipedia.org/wiki/Extended_file_attributes . + +All functions are provided in triples: Get/LGet/FGet, Set/LSet/FSet etc. The "L" +variant will not follow a symlink at the end of the path, and "F" variant accepts +a file descriptor instead of a path. + +Example for "L" variant, assuming path is "/symlink1/symlink2", where both components are +symlinks: +Get will follow "symlink1" and "symlink2" and operate on the target of +"symlink2". LGet will follow "symlink1" but operate directly on "symlink2". +*/ +package xattr + +import ( + "os" + "syscall" +) + +// Error records an error and the operation, file path and attribute that caused it. +type Error struct { + Op string + Path string + Name string + Err error +} + +func (e *Error) Error() (errstr string) { + if e.Op != "" { + errstr += e.Op + } + if e.Path != "" { + if errstr != "" { + errstr += " " + } + errstr += e.Path + } + if e.Name != "" { + if errstr != "" { + errstr += " " + } + errstr += e.Name + } + if e.Err != nil { + if errstr != "" { + errstr += ": " + } + errstr += e.Err.Error() + } + return +} + +// Get retrieves extended attribute data associated with path. It will follow +// all symlinks along the path. +func Get(path, name string) ([]byte, error) { + return get(path, name, func(name string, data []byte) (int, error) { + return getxattr(path, name, data) + }) +} + +// LGet is like Get but does not follow a symlink at the end of the path. +func LGet(path, name string) ([]byte, error) { + return get(path, name, func(name string, data []byte) (int, error) { + return lgetxattr(path, name, data) + }) +} + +// FGet is like Get but accepts a os.File instead of a file path. +func FGet(f *os.File, name string) ([]byte, error) { + return get(f.Name(), name, func(name string, data []byte) (int, error) { + return fgetxattr(f, name, data) + }) +} + +type getxattrFunc func(name string, data []byte) (int, error) + +// get contains the buffer allocation logic used by both Get and LGet. +func get(path string, name string, getxattrFunc getxattrFunc) ([]byte, error) { + const ( + // Start with a 1 KB buffer for the xattr value + initialBufSize = 1024 + + // The theoretical maximum xattr value size on MacOS is 64 MB. On Linux it's + // much smaller at 64 KB. Unless the kernel is evil or buggy, we should never + // hit the limit. + maxBufSize = 64 * 1024 * 1024 + + // Function name as reported in error messages + myname = "xattr.get" + ) + + size := initialBufSize + for { + data := make([]byte, size) + read, err := getxattrFunc(name, data) + + // If the buffer was too small to fit the value, Linux and MacOS react + // differently: + // Linux: returns an ERANGE error and "-1" bytes. + // MacOS: truncates the value and returns "size" bytes. If the value + // happens to be exactly as big as the buffer, we cannot know if it was + // truncated, and we retry with a bigger buffer. Contrary to documentation, + // MacOS never seems to return ERANGE! + // To keep the code simple, we always check both conditions, and sometimes + // double the buffer size without it being strictly necessary. + if err == syscall.ERANGE || read == size { + // The buffer was too small. Try again. + size <<= 1 + if size >= maxBufSize { + return nil, &Error{myname, path, name, syscall.EOVERFLOW} + } + continue + } + if err != nil { + return nil, &Error{myname, path, name, err} + } + return data[:read], nil + } +} + +// Set associates name and data together as an attribute of path. +func Set(path, name string, data []byte) error { + if err := setxattr(path, name, data, 0); err != nil { + return &Error{"xattr.Set", path, name, err} + } + return nil +} + +// LSet is like Set but does not follow a symlink at +// the end of the path. +func LSet(path, name string, data []byte) error { + if err := lsetxattr(path, name, data, 0); err != nil { + return &Error{"xattr.LSet", path, name, err} + } + return nil +} + +// FSet is like Set but accepts a os.File instead of a file path. +func FSet(f *os.File, name string, data []byte) error { + if err := fsetxattr(f, name, data, 0); err != nil { + return &Error{"xattr.FSet", f.Name(), name, err} + } + return nil +} + +// SetWithFlags associates name and data together as an attribute of path. +// Forwards the flags parameter to the syscall layer. +func SetWithFlags(path, name string, data []byte, flags int) error { + if err := setxattr(path, name, data, flags); err != nil { + return &Error{"xattr.SetWithFlags", path, name, err} + } + return nil +} + +// LSetWithFlags is like SetWithFlags but does not follow a symlink at +// the end of the path. +func LSetWithFlags(path, name string, data []byte, flags int) error { + if err := lsetxattr(path, name, data, flags); err != nil { + return &Error{"xattr.LSetWithFlags", path, name, err} + } + return nil +} + +// FSetWithFlags is like SetWithFlags but accepts a os.File instead of a file path. +func FSetWithFlags(f *os.File, name string, data []byte, flags int) error { + if err := fsetxattr(f, name, data, flags); err != nil { + return &Error{"xattr.FSetWithFlags", f.Name(), name, err} + } + return nil +} + +// Remove removes the attribute associated with the given path. +func Remove(path, name string) error { + if err := removexattr(path, name); err != nil { + return &Error{"xattr.Remove", path, name, err} + } + return nil +} + +// LRemove is like Remove but does not follow a symlink at the end of the +// path. +func LRemove(path, name string) error { + if err := lremovexattr(path, name); err != nil { + return &Error{"xattr.LRemove", path, name, err} + } + return nil +} + +// FRemove is like Remove but accepts a os.File instead of a file path. +func FRemove(f *os.File, name string) error { + if err := fremovexattr(f, name); err != nil { + return &Error{"xattr.FRemove", f.Name(), name, err} + } + return nil +} + +// List retrieves a list of names of extended attributes associated +// with the given path in the file system. +func List(path string) ([]string, error) { + return list(path, func(data []byte) (int, error) { + return listxattr(path, data) + }) +} + +// LList is like List but does not follow a symlink at the end of the +// path. +func LList(path string) ([]string, error) { + return list(path, func(data []byte) (int, error) { + return llistxattr(path, data) + }) +} + +// FList is like List but accepts a os.File instead of a file path. +func FList(f *os.File) ([]string, error) { + return list(f.Name(), func(data []byte) (int, error) { + return flistxattr(f, data) + }) +} + +type listxattrFunc func(data []byte) (int, error) + +// list contains the buffer allocation logic used by both List and LList. +func list(path string, listxattrFunc listxattrFunc) ([]string, error) { + myname := "xattr.list" + // find size. + size, err := listxattrFunc(nil) + if err != nil { + return nil, &Error{myname, path, "", err} + } + if size > 0 { + // `size + 1` because of ERANGE error when reading + // from a SMB1 mount point (https://github.com/pkg/xattr/issues/16). + buf := make([]byte, size+1) + // Read into buffer of that size. + read, err := listxattrFunc(buf) + if err != nil { + return nil, &Error{myname, path, "", err} + } + return stringsFromByteSlice(buf[:read]), nil + } + return []string{}, nil +} + +// bytePtrFromSlice returns a pointer to array of bytes and a size. +func bytePtrFromSlice(data []byte) (ptr *byte, size int) { + size = len(data) + if size > 0 { + ptr = &data[0] + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_bsd.go b/vendor/github.com/pkg/xattr/xattr_bsd.go new file mode 100644 index 000000000..f4a3f9539 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_bsd.go @@ -0,0 +1,201 @@ +//go:build freebsd || netbsd +// +build freebsd netbsd + +package xattr + +import ( + "os" + "syscall" + "unsafe" +) + +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + EXTATTR_NAMESPACE_USER = 1 + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENOATTR +) + +func getxattr(path string, name string, data []byte) (int, error) { + return sysGet(syscall.SYS_EXTATTR_GET_FILE, path, name, data) +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return sysGet(syscall.SYS_EXTATTR_GET_LINK, path, name, data) +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + return getxattr(f.Name(), name, data) +} + +// sysGet is called by getxattr and lgetxattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysGet(syscallNum uintptr, path string, name string, data []byte) (int, error) { + ptr, nbytes := bytePtrFromSlice(data) + /* + ssize_t extattr_get_file( + const char *path, + int attrnamespace, + const char *attrname, + void *data, + size_t nbytes); + + ssize_t extattr_get_link( + const char *path, + int attrnamespace, + const char *attrname, + void *data, + size_t nbytes); + */ + r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), + uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0) + if err != syscall.Errno(0) { + return int(r0), err + } + return int(r0), nil +} + +func setxattr(path string, name string, data []byte, flags int) error { + return sysSet(syscall.SYS_EXTATTR_SET_FILE, path, name, data) +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return sysSet(syscall.SYS_EXTATTR_SET_LINK, path, name, data) +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return setxattr(f.Name(), name, data, flags) +} + +// sysSet is called by setxattr and lsetxattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysSet(syscallNum uintptr, path string, name string, data []byte) error { + ptr, nbytes := bytePtrFromSlice(data) + /* + ssize_t extattr_set_file( + const char *path, + int attrnamespace, + const char *attrname, + const void *data, + size_t nbytes + ); + + ssize_t extattr_set_link( + const char *path, + int attrnamespace, + const char *attrname, + const void *data, + size_t nbytes + ); + */ + r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), + uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0) + if err != syscall.Errno(0) { + return err + } + if int(r0) != nbytes { + return syscall.E2BIG + } + return nil +} + +func removexattr(path string, name string) error { + return sysRemove(syscall.SYS_EXTATTR_DELETE_FILE, path, name) +} + +func lremovexattr(path string, name string) error { + return sysRemove(syscall.SYS_EXTATTR_DELETE_LINK, path, name) +} + +func fremovexattr(f *os.File, name string) error { + return removexattr(f.Name(), name) +} + +// sysSet is called by removexattr and lremovexattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysRemove(syscallNum uintptr, path string, name string) error { + /* + int extattr_delete_file( + const char *path, + int attrnamespace, + const char *attrname + ); + + int extattr_delete_link( + const char *path, + int attrnamespace, + const char *attrname + ); + */ + _, _, err := syscall.Syscall(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), + ) + if err != syscall.Errno(0) { + return err + } + return nil +} + +func listxattr(path string, data []byte) (int, error) { + return sysList(syscall.SYS_EXTATTR_LIST_FILE, path, data) +} + +func llistxattr(path string, data []byte) (int, error) { + return sysList(syscall.SYS_EXTATTR_LIST_LINK, path, data) +} + +func flistxattr(f *os.File, data []byte) (int, error) { + return listxattr(f.Name(), data) +} + +// sysSet is called by listxattr and llistxattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysList(syscallNum uintptr, path string, data []byte) (int, error) { + ptr, nbytes := bytePtrFromSlice(data) + /* + ssize_t extattr_list_file( + const char *path, + int attrnamespace, + void *data, + size_t nbytes + ); + + ssize_t extattr_list_link( + const char *path, + int attrnamespace, + void *data, + size_t nbytes + ); + */ + r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0, 0) + if err != syscall.Errno(0) { + return int(r0), err + } + return int(r0), nil +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// On FreeBSD, each entry consists of a single byte containing the length +// of the attribute name, followed by the attribute name. +// The name is _not_ terminated by NULL. +func stringsFromByteSlice(buf []byte) (result []string) { + index := 0 + for index < len(buf) { + next := index + 1 + int(buf[index]) + result = append(result, string(buf[index+1:next])) + index = next + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_darwin.go b/vendor/github.com/pkg/xattr/xattr_darwin.go new file mode 100644 index 000000000..ee7a501da --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_darwin.go @@ -0,0 +1,90 @@ +//go:build darwin +// +build darwin + +package xattr + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +// See https://opensource.apple.com/source/xnu/xnu-1504.15.3/bsd/sys/xattr.h.auto.html +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + XATTR_NOFOLLOW = 0x0001 + XATTR_CREATE = 0x0002 + XATTR_REPLACE = 0x0004 + XATTR_NOSECURITY = 0x0008 + XATTR_NODEFAULT = 0x0010 + XATTR_SHOWCOMPRESSION = 0x0020 + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENOATTR +) + +func getxattr(path string, name string, data []byte) (int, error) { + return unix.Getxattr(path, name, data) +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return unix.Lgetxattr(path, name, data) +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + return getxattr(f.Name(), name, data) +} + +func setxattr(path string, name string, data []byte, flags int) error { + return unix.Setxattr(path, name, data, flags) +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return unix.Lsetxattr(path, name, data, flags) +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return setxattr(f.Name(), name, data, flags) +} + +func removexattr(path string, name string) error { + return unix.Removexattr(path, name) +} + +func lremovexattr(path string, name string) error { + return unix.Lremovexattr(path, name) +} + +func fremovexattr(f *os.File, name string) error { + return removexattr(f.Name(), name) +} + +func listxattr(path string, data []byte) (int, error) { + return unix.Listxattr(path, data) +} + +func llistxattr(path string, data []byte) (int, error) { + return unix.Llistxattr(path, data) +} + +func flistxattr(f *os.File, data []byte) (int, error) { + return listxattr(f.Name(), data) +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// On Darwin and Linux, each entry is a NULL-terminated string. +func stringsFromByteSlice(buf []byte) (result []string) { + offset := 0 + for index, b := range buf { + if b == 0 { + result = append(result, string(buf[offset:index])) + offset = index + 1 + } + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_linux.go b/vendor/github.com/pkg/xattr/xattr_linux.go new file mode 100644 index 000000000..879085ee5 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_linux.go @@ -0,0 +1,142 @@ +//go:build linux +// +build linux + +package xattr + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + XATTR_CREATE = unix.XATTR_CREATE + XATTR_REPLACE = unix.XATTR_REPLACE + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENODATA +) + +// On Linux, FUSE and CIFS filesystems can return EINTR for interrupted system +// calls. This function works around this by retrying system calls until they +// stop returning EINTR. +// +// See https://github.com/golang/go/commit/6b420169d798c7ebe733487b56ea5c3fa4aab5ce. +func ignoringEINTR(fn func() error) (err error) { + for { + err = fn() + if err != unix.EINTR { + break + } + } + return err +} + +func getxattr(path string, name string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Getxattr(path, name, data) + return err + }) + return r, err +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Lgetxattr(path, name, data) + return err + }) + return r, err +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Fgetxattr(int(f.Fd()), name, data) + return err + }) + return r, err +} + +func setxattr(path string, name string, data []byte, flags int) error { + return ignoringEINTR(func() (err error) { + return unix.Setxattr(path, name, data, flags) + }) +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return ignoringEINTR(func() (err error) { + return unix.Lsetxattr(path, name, data, flags) + }) +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return ignoringEINTR(func() (err error) { + return unix.Fsetxattr(int(f.Fd()), name, data, flags) + }) +} + +func removexattr(path string, name string) error { + return ignoringEINTR(func() (err error) { + return unix.Removexattr(path, name) + }) +} + +func lremovexattr(path string, name string) error { + return ignoringEINTR(func() (err error) { + return unix.Lremovexattr(path, name) + }) +} + +func fremovexattr(f *os.File, name string) error { + return ignoringEINTR(func() (err error) { + return unix.Fremovexattr(int(f.Fd()), name) + }) +} + +func listxattr(path string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Listxattr(path, data) + return err + }) + return r, err +} + +func llistxattr(path string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Llistxattr(path, data) + return err + }) + return r, err +} + +func flistxattr(f *os.File, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Flistxattr(int(f.Fd()), data) + return err + }) + return r, err +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// On Darwin and Linux, each entry is a NULL-terminated string. +func stringsFromByteSlice(buf []byte) (result []string) { + offset := 0 + for index, b := range buf { + if b == 0 { + result = append(result, string(buf[offset:index])) + offset = index + 1 + } + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_solaris.go b/vendor/github.com/pkg/xattr/xattr_solaris.go new file mode 100644 index 000000000..38d88d609 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_solaris.go @@ -0,0 +1,165 @@ +//go:build solaris +// +build solaris + +package xattr + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENODATA +) + +func getxattr(path string, name string, data []byte) (int, error) { + f, err := os.OpenFile(path, os.O_RDONLY, 0) + if err != nil { + return 0, err + } + defer func() { + _ = f.Close() + }() + return fgetxattr(f, name, data) +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return 0, unix.ENOTSUP +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + fd, err := unix.Openat(int(f.Fd()), name, unix.O_RDONLY|unix.O_XATTR, 0) + if err != nil { + return 0, err + } + defer func() { + _ = unix.Close(fd) + }() + return unix.Read(fd, data) +} + +func setxattr(path string, name string, data []byte, flags int) error { + f, err := os.OpenFile(path, os.O_RDONLY, 0) + if err != nil { + return err + } + err = fsetxattr(f, name, data, flags) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return unix.ENOTSUP +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + mode := unix.O_WRONLY | unix.O_XATTR + if flags&XATTR_REPLACE != 0 { + mode |= unix.O_TRUNC + } else if flags&XATTR_CREATE != 0 { + mode |= unix.O_CREAT | unix.O_EXCL + } else { + mode |= unix.O_CREAT | unix.O_TRUNC + } + fd, err := unix.Openat(int(f.Fd()), name, mode, 0666) + if err != nil { + return err + } + if _, err = unix.Write(fd, data); err != nil { + _ = unix.Close(fd) + return err + } + return unix.Close(fd) +} + +func removexattr(path string, name string) error { + fd, err := unix.Open(path, unix.O_RDONLY|unix.O_XATTR, 0) + if err != nil { + return err + } + f := os.NewFile(uintptr(fd), path) + defer func() { + _ = f.Close() + }() + return fremovexattr(f, name) +} + +func lremovexattr(path string, name string) error { + return unix.ENOTSUP +} + +func fremovexattr(f *os.File, name string) error { + fd, err := unix.Openat(int(f.Fd()), ".", unix.O_XATTR, 0) + if err != nil { + return err + } + defer func() { + _ = unix.Close(fd) + }() + return unix.Unlinkat(fd, name, 0) +} + +func listxattr(path string, data []byte) (int, error) { + f, err := os.OpenFile(path, os.O_RDONLY, 0) + if err != nil { + return 0, err + } + defer func() { + _ = f.Close() + }() + return flistxattr(f, data) +} + +func llistxattr(path string, data []byte) (int, error) { + return 0, unix.ENOTSUP +} + +func flistxattr(f *os.File, data []byte) (int, error) { + fd, err := unix.Openat(int(f.Fd()), ".", unix.O_RDONLY|unix.O_XATTR, 0) + if err != nil { + return 0, err + } + xf := os.NewFile(uintptr(fd), f.Name()) + defer func() { + _ = xf.Close() + }() + names, err := xf.Readdirnames(-1) + if err != nil { + return 0, err + } + var buf []byte + for _, name := range names { + buf = append(buf, append([]byte(name), '\000')...) + } + if data == nil { + return len(buf), nil + } + return copy(data, buf), nil +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// On Darwin and Linux, each entry is a NULL-terminated string. +func stringsFromByteSlice(buf []byte) (result []string) { + offset := 0 + for index, b := range buf { + if b == 0 { + result = append(result, string(buf[offset:index])) + offset = index + 1 + } + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_unsupported.go b/vendor/github.com/pkg/xattr/xattr_unsupported.go new file mode 100644 index 000000000..4153decb1 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_unsupported.go @@ -0,0 +1,70 @@ +//go:build !linux && !freebsd && !netbsd && !darwin && !solaris +// +build !linux,!freebsd,!netbsd,!darwin,!solaris + +package xattr + +import ( + "os" + "syscall" +) + +const ( + // We need to use the default for non supported operating systems + ENOATTR = syscall.ENODATA +) + +// XATTR_SUPPORTED will be true if the current platform is supported +const XATTR_SUPPORTED = false + +func getxattr(path string, name string, data []byte) (int, error) { + return 0, nil +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return 0, nil +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + return 0, nil +} + +func setxattr(path string, name string, data []byte, flags int) error { + return nil +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return nil +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return nil +} + +func removexattr(path string, name string) error { + return nil +} + +func lremovexattr(path string, name string) error { + return nil +} + +func fremovexattr(f *os.File, name string) error { + return nil +} + +func listxattr(path string, data []byte) (int, error) { + return 0, nil +} + +func llistxattr(path string, data []byte) (int, error) { + return 0, nil +} + +func flistxattr(f *os.File, data []byte) (int, error) { + return 0, nil +} + +// dummy +func stringsFromByteSlice(buf []byte) (result []string) { + return []string{} +} diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go new file mode 100644 index 000000000..b423feaea --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -0,0 +1,285 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package argon2 implements the key derivation function Argon2. +// Argon2 was selected as the winner of the Password Hashing Competition and can +// be used to derive cryptographic keys from passwords. +// +// For a detailed specification of Argon2 see [1]. +// +// If you aren't sure which function you need, use Argon2id (IDKey) and +// the parameter recommendations for your scenario. +// +// +// Argon2i +// +// Argon2i (implemented by Key) is the side-channel resistant version of Argon2. +// It uses data-independent memory access, which is preferred for password +// hashing and password-based key derivation. Argon2i requires more passes over +// memory than Argon2id to protect from trade-off attacks. The recommended +// parameters (taken from [2]) for non-interactive operations are time=3 and to +// use the maximum available memory. +// +// +// Argon2id +// +// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining +// Argon2i and Argon2d. It uses data-independent memory access for the first +// half of the first iteration over the memory and data-dependent memory access +// for the rest. Argon2id is side-channel resistant and provides better brute- +// force cost savings due to time-memory tradeoffs than Argon2i. The recommended +// parameters for non-interactive operations (taken from [2]) are time=1 and to +// use the maximum available memory. +// +// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 +package argon2 + +import ( + "encoding/binary" + "sync" + + "golang.org/x/crypto/blake2b" +) + +// The Argon2 version implemented by this package. +const Version = 0x13 + +const ( + argon2d = iota + argon2i + argon2id +) + +// Key derives a key from the password, salt, and cost parameters using Argon2i +// returning a byte slice of length keyLen that can be used as cryptographic +// key. The CPU cost and parallelism degree must be greater than zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) +// +// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. +// If using that amount of memory (32 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be +// adjusted to the number of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) +} + +// IDKey derives a key from the password, salt, and cost parameters using +// Argon2id returning a byte slice of length keyLen that can be used as +// cryptographic key. The CPU cost and parallelism degree must be greater than +// zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) +// +// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. +// If using that amount of memory (64 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be +// adjusted to the numbers of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) +} + +func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + if time < 1 { + panic("argon2: number of rounds too small") + } + if threads < 1 { + panic("argon2: parallelism degree too low") + } + h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode) + + memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads)) + if memory < 2*syncPoints*uint32(threads) { + memory = 2 * syncPoints * uint32(threads) + } + B := initBlocks(&h0, memory, uint32(threads)) + processBlocks(B, time, memory, uint32(threads), mode) + return extractKey(B, memory, uint32(threads), keyLen) +} + +const ( + blockLength = 128 + syncPoints = 4 +) + +type block [blockLength]uint64 + +func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte { + var ( + h0 [blake2b.Size + 8]byte + params [24]byte + tmp [4]byte + ) + + b2, _ := blake2b.New512(nil) + binary.LittleEndian.PutUint32(params[0:4], threads) + binary.LittleEndian.PutUint32(params[4:8], keyLen) + binary.LittleEndian.PutUint32(params[8:12], memory) + binary.LittleEndian.PutUint32(params[12:16], time) + binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) + binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) + b2.Write(params[:]) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) + b2.Write(tmp[:]) + b2.Write(password) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) + b2.Write(tmp[:]) + b2.Write(salt) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) + b2.Write(tmp[:]) + b2.Write(key) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) + b2.Write(tmp[:]) + b2.Write(data) + b2.Sum(h0[:0]) + return h0 +} + +func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block { + var block0 [1024]byte + B := make([]block, memory) + for lane := uint32(0); lane < threads; lane++ { + j := lane * (memory / threads) + binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) + blake2bHash(block0[:], h0[:]) + for i := range B[j+0] { + B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) + blake2bHash(block0[:], h0[:]) + for i := range B[j+1] { + B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + } + return B +} + +func processBlocks(B []block, time, memory, threads uint32, mode int) { + lanes := memory / threads + segments := lanes / syncPoints + + processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { + var addresses, in, zero block + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + in[0] = uint64(n) + in[1] = uint64(lane) + in[2] = uint64(slice) + in[3] = uint64(memory) + in[4] = uint64(time) + in[5] = uint64(mode) + } + + index := uint32(0) + if n == 0 && slice == 0 { + index = 2 // we have already generated the first two blocks + if mode == argon2i || mode == argon2id { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + } + + offset := lane*lanes + slice*segments + index + var random uint64 + for index < segments { + prev := offset - 1 + if index == 0 && slice == 0 { + prev += lanes // last block in lane + } + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + if index%blockLength == 0 { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + random = addresses[index%blockLength] + } else { + random = B[prev][0] + } + newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) + processBlockXOR(&B[offset], &B[prev], &B[newOffset]) + index, offset = index+1, offset+1 + } + wg.Done() + } + + for n := uint32(0); n < time; n++ { + for slice := uint32(0); slice < syncPoints; slice++ { + var wg sync.WaitGroup + for lane := uint32(0); lane < threads; lane++ { + wg.Add(1) + go processSegment(n, slice, lane, &wg) + } + wg.Wait() + } + } + +} + +func extractKey(B []block, memory, threads, keyLen uint32) []byte { + lanes := memory / threads + for lane := uint32(0); lane < threads-1; lane++ { + for i, v := range B[(lane*lanes)+lanes-1] { + B[memory-1][i] ^= v + } + } + + var block [1024]byte + for i, v := range B[memory-1] { + binary.LittleEndian.PutUint64(block[i*8:], v) + } + key := make([]byte, keyLen) + blake2bHash(key, block[:]) + return key +} + +func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { + refLane := uint32(rand>>32) % threads + if n == 0 && slice == 0 { + refLane = lane + } + m, s := 3*segments, ((slice+1)%syncPoints)*segments + if lane == refLane { + m += index + } + if n == 0 { + m, s = slice*segments, 0 + if slice == 0 || lane == refLane { + m += index + } + } + if index == 0 || lane == refLane { + m-- + } + return phi(rand, uint64(m), uint64(s), refLane, lanes) +} + +func phi(rand, m, s uint64, lane, lanes uint32) uint32 { + p := rand & 0xFFFFFFFF + p = (p * p) >> 32 + p = (p * m) >> 32 + return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) +} diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go new file mode 100644 index 000000000..10f46948d --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blake2b.go @@ -0,0 +1,53 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +import ( + "encoding/binary" + "hash" + + "golang.org/x/crypto/blake2b" +) + +// blake2bHash computes an arbitrary long hash value of in +// and writes the hash to out. +func blake2bHash(out []byte, in []byte) { + var b2 hash.Hash + if n := len(out); n < blake2b.Size { + b2, _ = blake2b.New(n, nil) + } else { + b2, _ = blake2b.New512(nil) + } + + var buffer [blake2b.Size]byte + binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) + b2.Write(buffer[:4]) + b2.Write(in) + + if len(out) <= blake2b.Size { + b2.Sum(out[:0]) + return + } + + outLen := len(out) + b2.Sum(buffer[:0]) + b2.Reset() + copy(out, buffer[:32]) + out = out[32:] + for len(out) > blake2b.Size { + b2.Write(buffer[:]) + b2.Sum(buffer[:0]) + copy(out, buffer[:32]) + out = out[32:] + b2.Reset() + } + + if outLen%blake2b.Size > 0 { // outLen > 64 + r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 + b2, _ = blake2b.New(outLen-32*r, nil) + } + b2.Write(buffer[:]) + b2.Sum(out[:0]) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go new file mode 100644 index 000000000..a014ac92a --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -0,0 +1,61 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +package argon2 + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func mixBlocksSSE2(out, a, b, c *block) + +//go:noescape +func xorBlocksSSE2(out, a, b, c *block) + +//go:noescape +func blamkaSSE4(b *block) + +func processBlockSSE(out, in1, in2 *block, xor bool) { + var t block + mixBlocksSSE2(&t, in1, in2, &t) + if useSSE4 { + blamkaSSE4(&t) + } else { + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + } + if xor { + xorBlocksSSE2(out, in1, in2, &t) + } else { + mixBlocksSSE2(out, in1, in2, &t) + } +} + +func processBlock(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s new file mode 100644 index 000000000..b2cc05150 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -0,0 +1,244 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +#include "textflag.h" + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFD $0xB1, v6, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + PSHUFB c40, v2; \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFB c48, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + MOVO v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v7, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + PSHUFB c40, v3; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFB c48, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + MOVO v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG_0(block, off) \ + MOVOU 8*(off+0)(block), X0; \ + MOVOU 8*(off+2)(block), X1; \ + MOVOU 8*(off+4)(block), X2; \ + MOVOU 8*(off+6)(block), X3; \ + MOVOU 8*(off+8)(block), X4; \ + MOVOU 8*(off+10)(block), X5; \ + MOVOU 8*(off+12)(block), X6; \ + MOVOU 8*(off+14)(block), X7 + +#define STORE_MSG_0(block, off) \ + MOVOU X0, 8*(off+0)(block); \ + MOVOU X1, 8*(off+2)(block); \ + MOVOU X2, 8*(off+4)(block); \ + MOVOU X3, 8*(off+6)(block); \ + MOVOU X4, 8*(off+8)(block); \ + MOVOU X5, 8*(off+10)(block); \ + MOVOU X6, 8*(off+12)(block); \ + MOVOU X7, 8*(off+14)(block) + +#define LOAD_MSG_1(block, off) \ + MOVOU 8*off+0*8(block), X0; \ + MOVOU 8*off+16*8(block), X1; \ + MOVOU 8*off+32*8(block), X2; \ + MOVOU 8*off+48*8(block), X3; \ + MOVOU 8*off+64*8(block), X4; \ + MOVOU 8*off+80*8(block), X5; \ + MOVOU 8*off+96*8(block), X6; \ + MOVOU 8*off+112*8(block), X7 + +#define STORE_MSG_1(block, off) \ + MOVOU X0, 8*off+0*8(block); \ + MOVOU X1, 8*off+16*8(block); \ + MOVOU X2, 8*off+32*8(block); \ + MOVOU X3, 8*off+48*8(block); \ + MOVOU X4, 8*off+64*8(block); \ + MOVOU X5, 8*off+80*8(block); \ + MOVOU X6, 8*off+96*8(block); \ + MOVOU X7, 8*off+112*8(block) + +#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ + LOAD_MSG_0(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_0(block, off) + +#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ + LOAD_MSG_1(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_1(block, off) + +// func blamkaSSE4(b *block) +TEXT ·blamkaSSE4(SB), 4, $0-8 + MOVQ b+0(FP), AX + + MOVOU ·c40<>(SB), X10 + MOVOU ·c48<>(SB), X11 + + BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) + + BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) + RET + +// func mixBlocksSSE2(out, a, b, c *block) +TEXT ·mixBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + PXOR X1, X0 + PXOR X2, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET + +// func xorBlocksSSE2(out, a, b, c *block) +TEXT ·xorBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + MOVOU 0(DX), X3 + PXOR X1, X0 + PXOR X2, X0 + PXOR X3, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go new file mode 100644 index 000000000..a481b2243 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_generic.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +var useSSE4 bool + +func processBlockGeneric(out, in1, in2 *block, xor bool) { + var t block + for i := range t { + t[i] = in1[i] ^ in2[i] + } + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + if xor { + for i := range t { + out[i] ^= in1[i] ^ in2[i] ^ t[i] + } + } else { + for i := range t { + out[i] = in1[i] ^ in2[i] ^ t[i] + } + } +} + +func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { + v00, v01, v02, v03 := *t00, *t01, *t02, *t03 + v04, v05, v06, v07 := *t04, *t05, *t06, *t07 + v08, v09, v10, v11 := *t08, *t09, *t10, *t11 + v12, v13, v14, v15 := *t12, *t13, *t14, *t15 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>32 | v12<<32 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>24 | v04<<40 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>16 | v12<<48 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>63 | v04<<1 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>32 | v13<<32 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>24 | v05<<40 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>16 | v13<<48 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>63 | v05<<1 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>32 | v14<<32 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>24 | v06<<40 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>16 | v14<<48 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>63 | v06<<1 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>32 | v15<<32 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>24 | v07<<40 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>16 | v15<<48 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>63 | v07<<1 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>32 | v15<<32 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>24 | v05<<40 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>16 | v15<<48 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>63 | v05<<1 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>32 | v12<<32 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>24 | v06<<40 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>16 | v12<<48 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>63 | v06<<1 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>32 | v13<<32 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>24 | v07<<40 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>16 | v13<<48 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>63 | v07<<1 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>32 | v14<<32 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>24 | v04<<40 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>16 | v14<<48 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>63 | v04<<1 + + *t00, *t01, *t02, *t03 = v00, v01, v02, v03 + *t04, *t05, *t06, *t07 = v04, v05, v06, v07 + *t08, *t09, *t10, *t11 = v08, v09, v10, v11 + *t12, *t13, *t14, *t15 = v12, v13, v14, v15 +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go new file mode 100644 index 000000000..167c59d2d --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc + +package argon2 + +func processBlock(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go new file mode 100644 index 000000000..dda3f143b --- /dev/null +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation +// Function (HKDF) as defined in RFC 5869. +// +// HKDF is a cryptographic key derivation function (KDF) with the goal of +// expanding limited input keying material into one or more cryptographically +// strong secret keys. +package hkdf // import "golang.org/x/crypto/hkdf" + +import ( + "crypto/hmac" + "errors" + "hash" + "io" +) + +// Extract generates a pseudorandom key for use with Expand from an input secret +// and an optional independent salt. +// +// Only use this function if you need to reuse the extracted key with multiple +// Expand invocations and different context values. Most common scenarios, +// including the generation of multiple keys, should use New instead. +func Extract(hash func() hash.Hash, secret, salt []byte) []byte { + if salt == nil { + salt = make([]byte, hash().Size()) + } + extractor := hmac.New(hash, salt) + extractor.Write(secret) + return extractor.Sum(nil) +} + +type hkdf struct { + expander hash.Hash + size int + + info []byte + counter byte + + prev []byte + buf []byte +} + +func (f *hkdf) Read(p []byte) (int, error) { + // Check whether enough data can be generated + need := len(p) + remains := len(f.buf) + int(255-f.counter+1)*f.size + if remains < need { + return 0, errors.New("hkdf: entropy limit reached") + } + // Read any leftover from the buffer + n := copy(p, f.buf) + p = p[n:] + + // Fill the rest of the buffer + for len(p) > 0 { + f.expander.Reset() + f.expander.Write(f.prev) + f.expander.Write(f.info) + f.expander.Write([]byte{f.counter}) + f.prev = f.expander.Sum(f.prev[:0]) + f.counter++ + + // Copy the new batch into p + f.buf = f.prev + n = copy(p, f.buf) + p = p[n:] + } + // Save leftovers for next run + f.buf = f.buf[n:] + + return need, nil +} + +// Expand returns a Reader, from which keys can be read, using the given +// pseudorandom key and optional context info, skipping the extraction step. +// +// The pseudorandomKey should have been generated by Extract, or be a uniformly +// random or pseudorandom cryptographically strong key. See RFC 5869, Section +// 3.3. Most common scenarios will want to use New instead. +func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader { + expander := hmac.New(hash, pseudorandomKey) + return &hkdf{expander, expander.Size(), info, 1, nil, nil} +} + +// New returns a Reader, from which keys can be read, using the given hash, +// secret, salt and context info. Salt and info can be nil. +func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader { + prk := Extract(hash, secret, salt) + return Expand(hash, prk, info) +}