Compare commits

...

66 Commits

Author SHA1 Message Date
3bc20e95cc secrets migration & restitution 2023-02-12 11:58:26 +01:00
1aefc5d2b7 go.mod: go 1.20 2023-02-09 08:58:48 +01:00
5c432e3b42 go mod update 2023-02-07 21:31:02 +01:00
b6c714fac7 downloads API, UI 2023-02-07 21:29:19 +01:00
e44303eab9 go mod update 2023-02-02 17:08:56 +01:00
2a9295e8e8 bump initrdv2 2023-02-02 00:32:38 +01:00
52ffbe9727 go mod update 2023-01-27 12:01:56 +01:00
811a3bddfd renew: don't use Renew, just create a new cert 2023-01-27 06:42:39 +01:00
227c341f6b renew: hande more error cases 2023-01-27 06:25:51 +01:00
153c37b591 secrets: more verbose errors 2023-01-27 06:13:05 +01:00
4ff85eaeb3 remove bootstrap pods from cluster, and render in hosts' context 2023-01-16 18:17:11 +01:00
76e02c6f31 allow configuration of grub-support version 2022-12-22 00:28:54 +01:00
93b32eb52a dockerfile: go 1.19.0 2022-08-05 16:06:36 +02:00
0fcd219268 boot-v2.iso: initial commit 2022-07-21 16:03:39 +02:00
18d3c42fc7 boot efi 2022-05-31 11:57:21 +02:00
645c617956 migrate boot.tar to initrd-v2 2022-05-31 09:52:43 +02:00
dacfc8c6ce upgrade deps 2022-04-28 10:01:21 +02:00
16a0ff0823 bootv2 support 2022-04-28 03:33:19 +02:00
0d298c9951 version dumps 2021-11-14 15:40:59 +01:00
3673a2f361 ssh acls preliminary support 2021-11-14 15:28:40 +01:00
4d92925170 env updates 2020-10-02 05:29:04 +02:00
a66d54d238 oops 2020-04-22 18:40:30 +02:00
748a028161 tls: automatic certificate renewal 2020-04-22 17:36:04 +02:00
5e667295ac small moves 2020-03-05 10:55:59 +01:00
a54d4bc15e cmdline query param for boot.iso 2020-03-05 00:06:49 +01:00
1ee5d1c15a fix: render template for addons and bootstrap pods 2019-12-26 11:10:23 +01:00
49a16fe550 boot.img: more verbose losetup run 2019-12-23 11:32:55 +01:00
984b2e0057 improve fetch status report 2019-12-19 18:34:18 +01:00
863a436915 don't write secret-data if not needed 2019-12-19 16:57:22 +01:00
55c72aefa8 sanity commit 2019-12-16 08:00:57 +01:00
8ce4e97922 fix: upload config without previous config + bump deps 2019-12-07 21:09:12 +01:00
f43f4fcec4 generate ssh secrets 2019-12-03 14:39:42 +01:00
6ef93489bd golang 1.13.4 2019-11-26 13:58:50 +01:00
85af5ccc36 Merge branch 'fix-api-ds' into 'master'
Fix API for bootstrap-pods daemonsets (apps/v1)

See merge request novit-nc/direktil/local-server!2
2019-11-08 04:42:41 +00:00
051b42fda8 Fix API for bootstrap-pods daemonsets (apps/v1) 2019-11-08 08:15:43 +11:00
39ea639cc3 dir2config: hosts_by_cluster 2019-10-24 14:06:18 +11:00
b5b8514c59 improve debugging 2019-10-23 11:03:35 +11:00
fec03e0a7e better merge of values 2019-10-19 15:08:41 +11:00
840824d438 Merge branch 'gfenollar' into 'master'
Add curl, govc and other stuff to ease ops

See merge request novit-nc/direktil/local-server!1
2019-10-18 07:18:14 +00:00
59fe6373dc more logs when loading defaults 2019-10-18 18:02:39 +11:00
43304de2ca Add double quotes for path with spaces 2019-10-18 18:00:04 +11:00
4ad32c64a6 Add curl, govc, and vmware upload script for quicker nodes updates 2019-10-18 16:52:45 +11:00
49c73be97a clear vendor 2019-10-17 14:21:08 +11:00
c0fc7bbe3d move store out 2019-10-17 13:53:07 +11:00
6ddc4d6da4 dir2config: bs pods: fix pod template 2019-10-17 01:07:27 +11:00
6c8835c5ab update dockerfile 2019-10-16 19:17:22 +11:00
4b0f5dca84 store: log when upload SHA1 is ok 2019-10-16 18:24:28 +11:00
48ab32f319 dkl-store-upload 2019-10-16 18:23:03 +11:00
a7158e9e56 store: upload support 2019-10-16 17:58:48 +11:00
daa919e953 template details rendering 2019-10-16 17:09:01 +11:00
50ee480caf dir2config: hosts_by_group returns maps 2019-10-16 16:46:04 +11:00
d27c4ed7a3 dir2config: more logs on cluster templates 2019-10-16 16:21:12 +11:00
36e1367522 store: prepare upload 2019-10-16 16:16:15 +11:00
4679da1c1e bootstrap pods support 2019-10-09 17:45:19 +11:00
dde0ad6975 ls 2019-10-09 16:58:28 +11:00
ee2779cc9d better map merge 2019-04-19 17:07:22 +01:00
a1fcd4093c so go-restful require different prefixes now 2019-04-18 18:47:31 +01:00
9b62d598bb factorize filter clauses 2019-04-15 18:56:31 +01:00
456722a616 dls: password support 2019-04-13 10:36:58 +01:00
6a0cd6da02 dkl-store 2019-04-12 17:17:49 +01:00
676c4bc21b todo 2019-04-12 10:22:39 +01:00
201bca587e check status code 2019-04-12 10:15:57 +01:00
024fcdd35c improve update-boot.sh 2019-03-29 19:51:23 +11:00
663b42ed47 update boot 2019-03-29 19:49:31 +11:00
c21b07572d docker & go updates 2019-03-19 14:57:57 +11:00
1391108d60 golang 1.12 2019-03-06 17:27:40 +11:00
1022 changed files with 21386 additions and 204425 deletions

3
.gitignore vendored
View File

@ -1,2 +1,5 @@
*.sw[po] *.sw[po]
modd-local.conf modd-local.conf
/tmp
/go.work
/dist

View File

@ -1,23 +1,23 @@
# ------------------------------------------------------------------------ # ------------------------------------------------------------------------
from golang:1.11.5 as build from mcluseau/golang-builder:1.20.0 as build
env pkg novit.nc/direktil/local-server
copy vendor /go/src/${pkg}/vendor
copy pkg /go/src/${pkg}/pkg
copy cmd /go/src/${pkg}/cmd
workdir /go/src/${pkg}
run go test ./... \
&& go install ./cmd/...
# ------------------------------------------------------------------------ # ------------------------------------------------------------------------
from debian:stretch from debian:stretch
entrypoint ["/bin/dkl-local-server"] entrypoint ["/bin/dkl-local-server"]
env _uncache 1
run apt-get update \ run apt-get update \
&& apt-get install -y genisoimage gdisk dosfstools util-linux udev \ && apt-get install -y genisoimage gdisk dosfstools util-linux udev binutils systemd \
&& apt-get clean && apt-get clean
run yes |apt-get install -y grub2 grub-pc-bin grub-efi-amd64-bin \ run yes |apt-get install -y grub2 grub-pc-bin grub-efi-amd64-bin \
&& apt-get clean && apt-get clean
run apt-get install -y ca-certificates curl openssh-client \
&& apt-get clean
run curl -L https://github.com/vmware/govmomi/releases/download/v0.21.0/govc_linux_amd64.gz | gunzip > /bin/govc && chmod +x /bin/govc
copy upload-vmware.sh govc.env /var/lib/direktil/
copy --from=build /go/bin/ /bin/ copy --from=build /go/bin/ /bin/

View File

@ -1,16 +1,15 @@
package main package main
import ( import (
"bytes"
"flag" "flag"
"fmt"
"log" "log"
"os" "os"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
"novit.nc/direktil/pkg/localconfig"
"novit.nc/direktil/local-server/pkg/clustersconfig" "novit.tech/direktil/pkg/localconfig"
"novit.tech/direktil/local-server/pkg/clustersconfig"
) )
var ( var (
@ -78,6 +77,12 @@ func main() {
dst.Hosts = append(dst.Hosts, &localconfig.Host{ dst.Hosts = append(dst.Hosts, &localconfig.Host{
Name: host.Name, Name: host.Name,
ClusterName: ctx.Cluster.Name,
Labels: ctx.Labels,
Annotations: ctx.Annotations,
MACs: macs, MACs: macs,
IPs: ips, IPs: ips,
@ -87,6 +92,7 @@ func main() {
Initrd: ctx.Group.Initrd, Initrd: ctx.Group.Initrd,
Versions: ctx.Group.Versions, Versions: ctx.Group.Versions,
BootstrapConfig: ctx.BootstrapConfig(),
Config: ctx.Config(), Config: ctx.Config(),
}) })
} }
@ -104,34 +110,3 @@ func main() {
} }
} }
func renderAddons(cluster *clustersconfig.Cluster) string {
if len(cluster.Addons) == 0 {
return ""
}
addons := src.Addons[cluster.Addons]
if addons == nil {
log.Fatalf("cluster %q: no addons with name %q", cluster.Name, cluster.Addons)
}
clusterAsMap := asMap(cluster)
clusterAsMap["kubernetes_svc_ip"] = cluster.KubernetesSvcIP().String()
clusterAsMap["dns_svc_ip"] = cluster.DNSSvcIP().String()
buf := &bytes.Buffer{}
for _, addon := range addons {
fmt.Fprintf(buf, "---\n# addon: %s\n", addon.Name)
err := addon.Execute(buf, clusterAsMap, nil)
if err != nil {
log.Fatalf("cluster %q: addons %q: failed to render %q: %v",
cluster.Name, cluster.Addons, addon.Name, err)
}
fmt.Fprintln(buf)
}
return buf.String()
}

View File

@ -0,0 +1,116 @@
package main
import (
"bytes"
"fmt"
"log"
"path"
"novit.tech/direktil/local-server/pkg/clustersconfig"
)
func clusterFuncs(clusterSpec *clustersconfig.Cluster) map[string]interface{} {
cluster := clusterSpec.Name
return map[string]interface{}{
"password": func(name string) (s string) {
return fmt.Sprintf("{{ password %q %q }}", cluster, name)
},
"token": func(name string) (s string) {
return fmt.Sprintf("{{ token %q %q }}", cluster, name)
},
"ca_key": func(name string) (s string, err error) {
// TODO check CA exists
// ?ctx.clusterConfig.CA(name)
return fmt.Sprintf("{{ ca_key %q %q }}", cluster, name), nil
},
"ca_crt": func(name string) (s string, err error) {
// TODO check CA exists
return fmt.Sprintf("{{ ca_crt %q %q }}", cluster, name), nil
},
"ca_dir": func(name string) (s string, err error) {
return fmt.Sprintf("{{ ca_dir %q %q }}", cluster, name), nil
},
"hosts_by_cluster": func(cluster string) (hosts []interface{}) {
for _, host := range src.Hosts {
if host.Cluster == cluster {
hosts = append(hosts, asMap(host))
}
}
if len(hosts) == 0 {
log.Printf("WARNING: no hosts in cluster %q", cluster)
}
return
},
"hosts_by_group": func(group string) (hosts []interface{}) {
for _, host := range src.Hosts {
if host.Group == group {
hosts = append(hosts, asMap(host))
}
}
if len(hosts) == 0 {
log.Printf("WARNING: no hosts in group %q", group)
}
return
},
}
}
func renderClusterTemplates(cluster *clustersconfig.Cluster, setName string,
templates []*clustersconfig.Template) []byte {
clusterAsMap := asMap(cluster)
clusterAsMap["kubernetes_svc_ip"] = cluster.KubernetesSvcIP().String()
clusterAsMap["dns_svc_ip"] = cluster.DNSSvcIP().String()
funcs := clusterFuncs(cluster)
log.Print("rendering cluster templates in ", setName)
buf := &bytes.Buffer{}
contextName := "cluster:" + cluster.Name
for _, t := range templates {
log.Print("- template: ", setName, ": ", t.Name)
fmt.Fprintf(buf, "---\n# %s: %s\n", setName, t.Name)
err := t.Execute(contextName, path.Join(setName, t.Name), buf, clusterAsMap, funcs)
if err != nil {
log.Fatalf("cluster %q: %s: failed to render %q: %v",
cluster.Name, setName, t.Name, err)
}
fmt.Fprintln(buf)
}
return buf.Bytes()
}
func renderAddons(cluster *clustersconfig.Cluster) string {
if len(cluster.Addons) == 0 {
return ""
}
addons := src.Addons[cluster.Addons]
if addons == nil {
log.Fatalf("cluster %q: no addons with name %q", cluster.Name, cluster.Addons)
}
return string(renderClusterTemplates(cluster, "addons", addons))
}
type namePod struct {
Namespace string
Name string
Pod map[string]interface{}
}

View File

@ -2,19 +2,32 @@ package main
import ( import (
"bytes" "bytes"
"crypto/sha1"
"encoding/hex"
"fmt" "fmt"
"io"
"log" "log"
"path"
"reflect"
"strings"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
"novit.nc/direktil/local-server/pkg/clustersconfig" "novit.tech/direktil/pkg/config"
"novit.tech/direktil/local-server/pkg/clustersconfig"
) )
type renderContext struct { type renderContext struct {
Labels map[string]string
Annotations map[string]string
Host *clustersconfig.Host Host *clustersconfig.Host
Group *clustersconfig.Group Group *clustersconfig.Group
Cluster *clustersconfig.Cluster Cluster *clustersconfig.Cluster
Vars map[string]interface{} Vars map[string]interface{}
BootstrapConfigTemplate *clustersconfig.Template
ConfigTemplate *clustersconfig.Template ConfigTemplate *clustersconfig.Template
StaticPodsTemplate *clustersconfig.Template StaticPodsTemplate *clustersconfig.Template
@ -41,16 +54,19 @@ func newRenderContext(host *clustersconfig.Host, cfg *clustersconfig.Config) (ct
group.Vars, group.Vars,
host.Vars, host.Vars,
} { } {
for k, v := range oVars { mapMerge(vars, oVars)
vars[k] = v
}
} }
return &renderContext{ return &renderContext{
Labels: mergeLabels(cluster.Labels, group.Labels, host.Labels),
Annotations: mergeLabels(cluster.Annotations, group.Annotations, host.Annotations),
Host: host, Host: host,
Group: group, Group: group,
Cluster: cluster, Cluster: cluster,
Vars: vars, Vars: vars,
BootstrapConfigTemplate: cfg.ConfigTemplate(group.BootstrapConfig),
ConfigTemplate: cfg.ConfigTemplate(group.Config), ConfigTemplate: cfg.ConfigTemplate(group.Config),
StaticPodsTemplate: cfg.StaticPodsTemplate(group.StaticPods), StaticPodsTemplate: cfg.StaticPodsTemplate(group.StaticPods),
@ -58,10 +74,95 @@ func newRenderContext(host *clustersconfig.Host, cfg *clustersconfig.Config) (ct
}, nil }, nil
} }
func mergeLabels(sources ...map[string]string) map[string]string {
ret := map[string]string{}
for _, src := range sources {
for k, v := range src {
ret[k] = v
}
}
return ret
}
func mapMerge(target, source map[string]interface{}) {
for k, v := range source {
target[k] = genericMerge(target[k], v)
}
}
func genericMerge(target, source interface{}) (result interface{}) {
srcV := reflect.ValueOf(source)
tgtV := reflect.ValueOf(target)
if srcV.Kind() == reflect.Map && tgtV.Kind() == reflect.Map {
// XXX maybe more specific later
result = map[interface{}]interface{}{}
resultV := reflect.ValueOf(result)
tgtIt := tgtV.MapRange()
for tgtIt.Next() {
sv := srcV.MapIndex(tgtIt.Key())
if sv.Kind() == 0 {
resultV.SetMapIndex(tgtIt.Key(), tgtIt.Value())
continue
}
merged := genericMerge(tgtIt.Value().Interface(), sv.Interface())
resultV.SetMapIndex(tgtIt.Key(), reflect.ValueOf(merged))
}
srcIt := srcV.MapRange()
for srcIt.Next() {
if resultV.MapIndex(srcIt.Key()).Kind() != 0 {
continue // already done
}
resultV.SetMapIndex(srcIt.Key(), srcIt.Value())
}
return
}
return source
}
func (ctx *renderContext) Name() string {
switch {
case ctx.Host != nil:
return "host:" + ctx.Host.Name
case ctx.Group != nil:
return "group:" + ctx.Group.Name
case ctx.Cluster != nil:
return "cluster:" + ctx.Cluster.Name
default:
return "unknown"
}
}
func (ctx *renderContext) BootstrapConfig() string {
if ctx.BootstrapConfigTemplate == nil {
log.Fatalf("no such (bootstrap) config: %q", ctx.Group.BootstrapConfig)
}
return ctx.renderConfig(ctx.BootstrapConfigTemplate)
}
func (ctx *renderContext) Config() string { func (ctx *renderContext) Config() string {
if ctx.ConfigTemplate == nil { if ctx.ConfigTemplate == nil {
log.Fatalf("no such config: %q", ctx.Group.Config) log.Fatalf("no such config: %q", ctx.Group.Config)
} }
return ctx.renderConfig(ctx.ConfigTemplate)
}
func (ctx *renderContext) renderConfig(configTemplate *clustersconfig.Template) string {
buf := new(strings.Builder)
ctx.renderConfigTo(buf, configTemplate)
return buf.String()
}
func (ctx *renderContext) renderConfigTo(buf io.Writer, configTemplate *clustersconfig.Template) {
ctxName := ctx.Name()
ctxMap := ctx.asMap() ctxMap := ctx.asMap()
@ -69,7 +170,7 @@ func (ctx *renderContext) Config() string {
render := func(what string, t *clustersconfig.Template) (s string, err error) { render := func(what string, t *clustersconfig.Template) (s string, err error) {
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
err = t.Execute(buf, ctxMap, templateFuncs) err = t.Execute(ctxName, what, buf, ctxMap, templateFuncs)
if err != nil { if err != nil {
log.Printf("host %s: failed to render %s [%q]: %v", ctx.Host.Name, what, t.Name, err) log.Printf("host %s: failed to render %s [%q]: %v", ctx.Host.Name, what, t.Name, err)
return return
@ -92,15 +193,41 @@ func (ctx *renderContext) Config() string {
return "", fmt.Errorf("no static pods template named %q", name) return "", fmt.Errorf("no static pods template named %q", name)
} }
return render("static pods", t) return render("static-pods", t)
} }
buf := bytes.NewBuffer(make([]byte, 0, 4096)) extraFuncs["bootstrap_pods_files"] = func(dir string) (string, error) {
if err := ctx.ConfigTemplate.Execute(buf, ctxMap, extraFuncs); err != nil { namePods := ctx.renderBootstrapPods()
defs := make([]config.FileDef, 0)
for _, namePod := range namePods {
name := namePod.Namespace + "_" + namePod.Name
ba, err := yaml.Marshal(namePod.Pod)
if err != nil {
return "", fmt.Errorf("bootstrap pod %s: failed to render: %v", name, err)
}
defs = append(defs, config.FileDef{
Path: path.Join(dir, name+".yaml"),
Mode: 0640,
Content: string(ba),
})
}
ba, err := yaml.Marshal(defs)
return string(ba), err
}
extraFuncs["machine_id"] = func() string {
ba := sha1.Sum([]byte(ctx.Cluster.Name + "/" + ctx.Host.Name)) // TODO: check semantics of machine-id
return hex.EncodeToString(ba[:])
}
if err := configTemplate.Execute(ctxName, "config", buf, ctxMap, extraFuncs); err != nil {
log.Fatalf("failed to render config %q for host %q: %v", ctx.Group.Config, ctx.Host.Name, err) log.Fatalf("failed to render config %q for host %q: %v", ctx.Group.Config, ctx.Host.Name, err)
} }
return buf.String()
} }
func (ctx *renderContext) StaticPods() (ba []byte, err error) { func (ctx *renderContext) StaticPods() (ba []byte, err error) {
@ -111,7 +238,7 @@ func (ctx *renderContext) StaticPods() (ba []byte, err error) {
ctxMap := ctx.asMap() ctxMap := ctx.asMap()
buf := bytes.NewBuffer(make([]byte, 0, 4096)) buf := bytes.NewBuffer(make([]byte, 0, 4096))
if err = ctx.StaticPodsTemplate.Execute(buf, ctxMap, ctx.templateFuncs(ctxMap)); err != nil { if err = ctx.StaticPodsTemplate.Execute(ctx.Name(), "static-pods", buf, ctxMap, ctx.templateFuncs(ctxMap)); err != nil {
return return
} }
@ -135,7 +262,7 @@ func (ctx *renderContext) templateFuncs(ctxMap map[string]interface{}) map[strin
} }
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
err = req.Execute(buf, ctxMap, nil) err = req.Execute(ctx.Name(), "req:"+name, buf, ctxMap, nil)
if err != nil { if err != nil {
return return
} }
@ -159,26 +286,8 @@ func (ctx *renderContext) templateFuncs(ctxMap map[string]interface{}) map[strin
return return
} }
return map[string]interface{}{ funcs := clusterFuncs(ctx.Cluster)
"token": func(name string) (s string) { for k, v := range map[string]interface{}{
return fmt.Sprintf("{{ token %q %q }}", cluster, name)
},
"ca_key": func(name string) (s string, err error) {
// TODO check CA exists
// ?ctx.clusterConfig.CA(name)
return fmt.Sprintf("{{ ca_key %q %q }}", cluster, name), nil
},
"ca_crt": func(name string) (s string, err error) {
// TODO check CA exists
return fmt.Sprintf("{{ ca_crt %q %q }}", cluster, name), nil
},
"ca_dir": func(name string) (s string, err error) {
return fmt.Sprintf("{{ ca_dir %q %q }}", cluster, name), nil
},
"tls_key": func(name string) (string, error) { "tls_key": func(name string) (string, error) {
return getKeyCert(name, "tls_key") return getKeyCert(name, "tls_key")
}, },
@ -191,6 +300,11 @@ func (ctx *renderContext) templateFuncs(ctxMap map[string]interface{}) map[strin
return getKeyCert(name, "tls_dir") return getKeyCert(name, "tls_dir")
}, },
"ssh_host_keys": func(dir string) (s string) {
return fmt.Sprintf("{{ ssh_host_keys %q %q %q}}",
dir, cluster, ctx.Host.Name)
},
"hosts_of_group": func() (hosts []interface{}) { "hosts_of_group": func() (hosts []interface{}) {
hosts = make([]interface{}, 0) hosts = make([]interface{}, 0)
@ -213,7 +327,10 @@ func (ctx *renderContext) templateFuncs(ctxMap map[string]interface{}) map[strin
} }
return return
}, },
} {
funcs[k] = v
} }
return funcs
} }
func (ctx *renderContext) asMap() map[string]interface{} { func (ctx *renderContext) asMap() map[string]interface{} {

View File

@ -0,0 +1,54 @@
package main
import (
"testing"
yaml "gopkg.in/yaml.v2"
)
func TestMerge(t *testing.T) {
if v := genericMerge("a", "b"); v != "b" {
t.Errorf("got %q", v)
}
if v := unparse(genericMerge(parse(`
a: t
b: t
m:
a1: t
b1: t
`), parse(`
a: s
c: s
m:
a1: s
c1: s
`))); "\n"+v != `
a: s
b: t
c: s
m:
a1: s
b1: t
c1: s
` {
t.Errorf("got\n%s", v)
}
}
func parse(s string) (r interface{}) {
r = map[string]interface{}{}
err := yaml.Unmarshal([]byte(s), r)
if err != nil {
panic(err)
}
return
}
func unparse(v interface{}) (s string) {
ba, err := yaml.Marshal(v)
if err != nil {
panic(err)
}
return string(ba)
}

View File

@ -0,0 +1,77 @@
package main
import (
"bytes"
"fmt"
"io"
"log"
yaml "gopkg.in/yaml.v2"
"novit.tech/direktil/local-server/pkg/clustersconfig"
)
func (ctx *renderContext) renderBootstrapPods() (pods []namePod) {
if ctx.Cluster.BootstrapPods == "" {
return
}
bootstrapPods, ok := src.BootstrapPods[ctx.Cluster.BootstrapPods]
if !ok {
log.Fatalf("no bootstrap pods template named %q", ctx.Cluster.BootstrapPods)
}
// render bootstrap pods
parts := bytes.Split(ctx.renderHostTemplates("bootstrap-pods", bootstrapPods), []byte("\n---\n"))
for _, part := range parts {
buf := bytes.NewBuffer(part)
dec := yaml.NewDecoder(buf)
for n := 0; ; n++ {
str := buf.String()
podMap := map[string]interface{}{}
err := dec.Decode(podMap)
if err == io.EOF {
break
} else if err != nil {
log.Fatalf("bootstrap pod %d: failed to parse: %v\n%s", n, err, str)
}
if len(podMap) == 0 {
continue
}
if podMap["metadata"] == nil {
log.Fatalf("bootstrap pod %d: no metadata\n%s", n, buf.String())
}
md := podMap["metadata"].(map[interface{}]interface{})
namespace := md["namespace"].(string)
name := md["name"].(string)
pods = append(pods, namePod{namespace, name, podMap})
}
}
return
}
func (ctx *renderContext) renderHostTemplates(setName string,
templates []*clustersconfig.Template) []byte {
log.Print("rendering host templates in ", setName)
buf := &bytes.Buffer{}
for _, t := range templates {
log.Print("- template: ", setName, ": ", t.Name)
fmt.Fprintf(buf, "---\n# %s: %s\n", setName, t.Name)
ctx.renderConfigTo(buf, t)
fmt.Fprintln(buf)
}
return buf.Bytes()
}

View File

@ -26,11 +26,32 @@ func authorizeToken(r *http.Request, token string) bool {
} }
reqToken := r.Header.Get("Authorization") reqToken := r.Header.Get("Authorization")
if reqToken != "" {
return reqToken == "Bearer "+token return reqToken == "Bearer "+token
} }
return r.URL.Query().Get("token") == token
}
func forbidden(w http.ResponseWriter, r *http.Request) { func forbidden(w http.ResponseWriter, r *http.Request) {
log.Printf("denied access to %s from %s", r.RequestURI, r.RemoteAddr) log.Printf("denied access to %s from %s", r.RequestURI, r.RemoteAddr)
http.Error(w, "Forbidden", http.StatusForbidden) http.Error(w, "Forbidden", http.StatusForbidden)
} }
func requireToken(token string, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if !authorizeToken(req, token) {
forbidden(w, req)
return
}
handler.ServeHTTP(w, req)
})
}
func requireAdmin(handler http.Handler) http.Handler {
return requireToken(*adminToken, handler)
}
func requireHosts(handler http.Handler) http.Handler {
return requireToken(*hostsToken, handler)
}

View File

@ -3,6 +3,7 @@ package main
import ( import (
"archive/tar" "archive/tar"
"compress/gzip" "compress/gzip"
"flag"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -56,8 +57,10 @@ func buildBootImgGZ(out io.Writer, ctx *renderContext) (err error) {
return return
} }
var grubSupportVersion = flag.String("grub-support", "1.0.1", "GRUB support version")
func setupBootImage(bootImg *os.File, ctx *renderContext) (err error) { func setupBootImage(bootImg *os.File, ctx *renderContext) (err error) {
path, err := ctx.distFetch("grub-support", "1.0.0") path, err := ctx.distFetch("grub-support", *grubSupportVersion)
if err != nil { if err != nil {
return return
} }
@ -81,7 +84,10 @@ func setupBootImage(bootImg *os.File, ctx *renderContext) (err error) {
return return
} }
devb, err := exec.Command("losetup", "--find", "--show", "--partscan", bootImg.Name()).CombinedOutput() log.Print("running losetup...")
cmd := exec.Command("losetup", "--find", "--show", "--partscan", bootImg.Name())
cmd.Stderr = os.Stderr
devb, err := cmd.Output()
if err != nil { if err != nil {
return return
} }

View File

@ -8,8 +8,12 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"strconv"
"github.com/cespare/xxhash"
) )
// deprecated
func buildBootISO(out io.Writer, ctx *renderContext) error { func buildBootISO(out io.Writer, ctx *renderContext) error {
tempDir, err := ioutil.TempDir("/tmp", "iso-") tempDir, err := ioutil.TempDir("/tmp", "iso-")
if err != nil { if err != nil {
@ -57,7 +61,7 @@ insmod all_video
set timeout=3 set timeout=3
menuentry "Direktil" { menuentry "Direktil" {
linux /vmlinuz direktil.boot=DEVNAME=sr0 direktil.boot.fs=iso9660 linux /vmlinuz direktil.boot=DEVNAME=sr0 direktil.boot.fs=iso9660 `+ctx.CmdLine+`
initrd /initrd initrd /initrd
} }
`), 0644) `), 0644)
@ -189,3 +193,159 @@ menuentry "Direktil" {
return cmd.Run() return cmd.Run()
} }
func buildBootISOv2(out io.Writer, ctx *renderContext) (err error) {
tempDir, err := ioutil.TempDir("/tmp", "iso-v2-")
if err != nil {
return
}
defer os.RemoveAll(tempDir)
buildRes := func(build func(out io.Writer, ctx *renderContext) error, dst string) (err error) {
log.Printf("iso-v2: building %s", dst)
outPath := filepath.Join(tempDir, dst)
if err = os.MkdirAll(filepath.Dir(outPath), 0755); err != nil {
return err
}
out, err := os.Create(outPath)
if err != nil {
return err
}
defer out.Close()
err = build(out, ctx)
if err != nil {
return
}
return err
}
err = func() (err error) {
// grub
if err = os.MkdirAll(filepath.Join(tempDir, "grub"), 0755); err != nil {
return
}
// create a tag file
bootstrapBytes, _, err := ctx.BootstrapConfig()
if err != nil {
return
}
h := xxhash.New()
fmt.Fprintln(h, ctx.Host.Kernel)
h.Write(bootstrapBytes)
tag := "dkl-" + strconv.FormatUint(h.Sum64(), 32) + ".tag"
f, err := os.Create(filepath.Join(tempDir, tag))
if err != nil {
return
}
f.Write([]byte("direktil marker file\n"))
f.Close()
err = ioutil.WriteFile(filepath.Join(tempDir, "grub", "grub.cfg"), []byte(`
search --set=root --file /`+tag+`
insmod all_video
set timeout=3
menuentry "Direktil" {
linux /vmlinuz `+ctx.CmdLine+`
initrd /initrd
}
`), 0644)
if err != nil {
return
}
coreImgPath := filepath.Join(tempDir, "grub", "core.img")
grubCfgPath := filepath.Join(tempDir, "grub", "grub.cfg")
cmd := exec.Command("grub-mkstandalone",
"--format=i386-pc",
"--output="+coreImgPath,
"--install-modules=linux normal iso9660 biosdisk memdisk search tar ls",
"--modules=linux normal iso9660 biosdisk search",
"--locales=",
"--fonts=",
"boot/grub/grub.cfg="+grubCfgPath,
)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return err
}
defer os.Remove(coreImgPath)
defer os.Remove(grubCfgPath)
out, err := os.Create(filepath.Join(tempDir, "grub", "bios.img"))
if err != nil {
return err
}
defer out.Close()
b, err := ioutil.ReadFile("/usr/lib/grub/i386-pc/cdboot.img")
if err != nil {
return err
}
if _, err := out.Write(b); err != nil {
return err
}
b, err = ioutil.ReadFile(coreImgPath)
if err != nil {
return err
}
if _, err := out.Write(b); err != nil {
return err
}
return nil
}()
if err != nil {
return err
}
// kernel and initrd
buildRes(fetchKernel, "vmlinuz")
buildRes(buildInitrdV2, "initrd")
// build the ISO
mkisofs, err := exec.LookPath("genisoimage")
if err != nil {
mkisofs, err = exec.LookPath("mkisofs")
}
if err != nil {
return err
}
cmd := exec.Command(mkisofs,
"-quiet",
"-joliet",
"-joliet-long",
"-rock",
"-translation-table",
"-no-emul-boot",
"-boot-load-size", "4",
"-boot-info-table",
"-eltorito-boot", "grub/bios.img",
"-eltorito-catalog", "grub/boot.cat",
tempDir,
)
cmd.Stdout = out
cmd.Stderr = os.Stderr
return cmd.Run()
}

View File

@ -2,11 +2,13 @@ package main
import ( import (
"archive/tar" "archive/tar"
"fmt" "bytes"
"io" "io"
"io/ioutil"
"log" "log"
"os" "os"
"path/filepath"
"novit.tech/direktil/local-server/pkg/utf16"
) )
func rmTempFile(f *os.File) { func rmTempFile(f *os.File) {
@ -21,7 +23,7 @@ func buildBootTar(out io.Writer, ctx *renderContext) (err error) {
defer arch.Close() defer arch.Close()
archAdd := func(path string, ba []byte) (err error) { archAdd := func(path string, ba []byte) (err error) {
err = arch.WriteHeader(&tar.Header{Name: path, Size: int64(len(ba))}) err = arch.WriteHeader(&tar.Header{Name: path, Mode: 0640, Size: int64(len(ba))})
if err != nil { if err != nil {
return return
} }
@ -29,70 +31,95 @@ func buildBootTar(out io.Writer, ctx *renderContext) (err error) {
return return
} }
// config // kernel
cfgBytes, cfg, err := ctx.Config() kernelPath, err := ctx.distFetch("kernels", ctx.Host.Kernel)
if err != nil { if err != nil {
return err return
} }
archAdd("config.yaml", cfgBytes) kernelBytes, err := ioutil.ReadFile(kernelPath)
// add "current" elements
type distCopy struct {
Src []string
Dst string
}
// kernel and initrd
copies := []distCopy{
{Src: []string{"kernels", ctx.Host.Kernel}, Dst: "current/vmlinuz"},
{Src: []string{"initrd", ctx.Host.Initrd}, Dst: "current/initrd"},
}
// layers
for _, layer := range cfg.Layers {
layerVersion := ctx.Host.Versions[layer]
if layerVersion == "" {
return fmt.Errorf("layer %q not mapped to a version", layer)
}
copies = append(copies,
distCopy{
Src: []string{"layers", layer, layerVersion},
Dst: filepath.Join("current", "layers", layer+".fs"),
})
}
for _, copy := range copies {
outPath, err := ctx.distFetch(copy.Src...)
if err != nil { if err != nil {
return err return
} }
f, err := os.Open(outPath) err = archAdd("current/vmlinuz", kernelBytes)
if err != nil { if err != nil {
return err return
} }
defer f.Close() // initrd
initrd := new(bytes.Buffer)
stat, err := f.Stat() err = buildInitrdV2(initrd, ctx)
if err != nil { if err != nil {
return err return
} }
if err = arch.WriteHeader(&tar.Header{ err = archAdd("current/initrd", initrd.Bytes())
Name: copy.Dst,
Size: stat.Size(),
}); err != nil {
return err
}
_, err = io.Copy(arch, f)
if err != nil { if err != nil {
return err return
}
} }
// done
return nil
}
func buildBootEFITar(out io.Writer, ctx *renderContext) (err error) {
arch := tar.NewWriter(out)
defer arch.Close()
archAdd := func(path string, ba []byte) (err error) {
err = arch.WriteHeader(&tar.Header{Name: path, Mode: 0640, Size: int64(len(ba))})
if err != nil {
return
}
_, err = arch.Write(ba)
return
}
const (
prefix = "EFI/dkl/"
efiPrefix = "\\EFI\\dkl\\"
)
// boot.csv
// -> annoyingly it's UTF-16...
bootCsvBytes := utf16.FromUTF8([]byte("" +
"current_kernel.efi,dkl current,initrd=" + efiPrefix + "current_initrd.img,Direktil current\n" +
"previous_kernel.efi,dkl previous,initrd=" + efiPrefix + "previous_initrd.img,Direktil previous\n"))
err = archAdd(prefix+"BOOT.CSV", []byte(bootCsvBytes))
if err != nil {
return
}
// kernel
kernelPath, err := ctx.distFetch("kernels", ctx.Host.Kernel)
if err != nil {
return
}
kernelBytes, err := ioutil.ReadFile(kernelPath)
if err != nil {
return
}
err = archAdd(prefix+"current_kernel.efi", kernelBytes)
if err != nil {
return
}
// initrd
initrd := new(bytes.Buffer)
err = buildInitrdV2(initrd, ctx)
if err != nil {
return
}
err = archAdd(prefix+"current_initrd.img", initrd.Bytes())
if err != nil {
return
}
// done
return nil return nil
} }

View File

@ -0,0 +1,143 @@
package main
import (
"archive/tar"
"encoding/json"
"flag"
"fmt"
"io"
"log"
"net/http"
"os"
yaml "gopkg.in/yaml.v2"
"novit.tech/direktil/pkg/cpiocat"
)
var initrdV2 = flag.String("initrd-v2", "2.1.0", "initrd V2 version (temporary flag)") // FIXME
func renderBootstrapConfig(w http.ResponseWriter, r *http.Request, ctx *renderContext, asJson bool) (err error) {
log.Printf("sending bootstrap config for %q", ctx.Host.Name)
_, cfg, err := ctx.BootstrapConfig()
if err != nil {
return err
}
if asJson {
err = json.NewEncoder(w).Encode(cfg)
} else {
err = yaml.NewEncoder(w).Encode(cfg)
}
return nil
}
func buildInitrdV2(out io.Writer, ctx *renderContext) (err error) {
_, cfg, err := ctx.Config()
if err != nil {
return
}
cat := cpiocat.New(out)
// initrd
initrdPath, err := ctx.distFetch("initrd", *initrdV2)
if err != nil {
return
}
cat.AppendArchFile(initrdPath)
// embedded layers (modules)
for _, layer := range cfg.Layers {
switch layer {
case "modules":
layerVersion := ctx.Host.Versions[layer]
modulesPath, err := ctx.distFetch("layers", layer, layerVersion)
if err != nil {
return err
}
cat.AppendFile(modulesPath, "modules.sqfs")
}
}
// config
cfgBytes, _, err := ctx.BootstrapConfig()
if err != nil {
return
}
cat.AppendBytes(cfgBytes, "config.yaml", 0600)
// ssh keys
// FIXME we want a bootstrap-stage key instead of the real host key
cat.AppendBytes(cfg.FileContent("/etc/ssh/ssh_host_rsa_key"), "id_rsa", 0600)
return cat.Close()
}
func buildBootstrap(out io.Writer, ctx *renderContext) (err error) {
arch := tar.NewWriter(out)
defer arch.Close()
// config
cfgBytes, cfg, err := ctx.Config()
if err != nil {
return err
}
err = arch.WriteHeader(&tar.Header{Name: "config.yaml", Size: int64(len(cfgBytes))})
if err != nil {
return
}
_, err = arch.Write(cfgBytes)
if err != nil {
return
}
// layers
for _, layer := range cfg.Layers {
if layer == "modules" {
continue // modules are with the kernel in boot v2
}
layerVersion := ctx.Host.Versions[layer]
if layerVersion == "" {
return fmt.Errorf("layer %q not mapped to a version", layer)
}
outPath, err := ctx.distFetch("layers", layer, layerVersion)
if err != nil {
return err
}
f, err := os.Open(outPath)
if err != nil {
return err
}
defer f.Close()
stat, err := f.Stat()
if err != nil {
return err
}
if err = arch.WriteHeader(&tar.Header{
Name: layer + ".fs",
Size: stat.Size(),
}); err != nil {
return err
}
_, err = io.Copy(arch, f)
if err != nil {
return err
}
}
return nil
}

View File

@ -25,7 +25,7 @@ func casCleaner() {
func cleanCAS() error { func cleanCAS() error {
cfg, err := readConfig() cfg, err := readConfig()
if err != nil { if err != nil {
return err return nil
} }
activeTags := make([]string, len(cfg.Hosts)) activeTags := make([]string, len(cfg.Hosts))

View File

@ -0,0 +1,169 @@
package main
import (
"encoding/json"
"fmt"
"log"
"path"
"github.com/cloudflare/cfssl/csr"
yaml "gopkg.in/yaml.v2"
"novit.tech/direktil/pkg/config"
)
var templateFuncs = map[string]interface{}{
"password": func(cluster, name string) (password string, err error) {
password = secretData.Password(cluster, name)
if len(password) == 0 {
err = fmt.Errorf("password %q not defined for cluster %q", name, cluster)
}
return
},
"token": func(cluster, name string) (s string, err error) {
return secretData.Token(cluster, name)
},
"ca_key": func(cluster, name string) (s string, err error) {
ca, err := secretData.CA(cluster, name)
if err != nil {
return
}
s = string(ca.Key)
return
},
"ca_crt": func(cluster, name string) (s string, err error) {
ca, err := secretData.CA(cluster, name)
if err != nil {
return
}
s = string(ca.Cert)
return
},
"ca_dir": func(cluster, name string) (s string, err error) {
ca, err := secretData.CA(cluster, name)
if err != nil {
return
}
dir := "/etc/tls-ca/" + name
return asYaml([]config.FileDef{
{
Path: path.Join(dir, "ca.crt"),
Mode: 0644,
Content: string(ca.Cert),
},
{
Path: path.Join(dir, "ca.key"),
Mode: 0600,
Content: string(ca.Key),
},
})
},
"tls_key": func(cluster, caName, name, profile, label, reqJson string) (s string, err error) {
kc, err := getKeyCert(cluster, caName, name, profile, label, reqJson)
if err != nil {
return
}
s = string(kc.Key)
return
},
"tls_crt": func(cluster, caName, name, profile, label, reqJson string) (s string, err error) {
kc, err := getKeyCert(cluster, caName, name, profile, label, reqJson)
if err != nil {
return
}
s = string(kc.Cert)
return
},
"tls_dir": func(dir, cluster, caName, name, profile, label, reqJson string) (s string, err error) {
ca, err := secretData.CA(cluster, caName)
if err != nil {
return
}
kc, err := getKeyCert(cluster, caName, name, profile, label, reqJson)
if err != nil {
return
}
return asYaml([]config.FileDef{
{
Path: path.Join(dir, "ca.crt"),
Mode: 0644,
Content: string(ca.Cert),
},
{
Path: path.Join(dir, "tls.crt"),
Mode: 0644,
Content: string(kc.Cert),
},
{
Path: path.Join(dir, "tls.key"),
Mode: 0600,
Content: string(kc.Key),
},
})
},
"ssh_host_keys": func(dir, cluster, host string) (s string, err error) {
pairs, err := secretData.SSHKeyPairs(cluster, host)
if err != nil {
return
}
files := make([]config.FileDef, 0, len(pairs)*2)
for _, pair := range pairs {
basePath := path.Join(dir, "ssh_host_"+pair.Type+"_key")
files = append(files, []config.FileDef{
{
Path: basePath,
Mode: 0600,
Content: pair.Private,
},
{
Path: basePath + ".pub",
Mode: 0644,
Content: pair.Public,
},
}...)
}
return asYaml(files)
},
}
func getKeyCert(cluster, caName, name, profile, label, reqJson string) (kc *KeyCert, err error) {
certReq := &csr.CertificateRequest{
KeyRequest: csr.NewKeyRequest(),
}
err = json.Unmarshal([]byte(reqJson), certReq)
if err != nil {
log.Print("CSR unmarshal failed on: ", reqJson)
return
}
return secretData.KeyCert(cluster, caName, name, profile, label, certReq)
}
func asYaml(v interface{}) (string, error) {
ba, err := yaml.Marshal(v)
if err != nil {
return "", err
}
return string(ba), nil
}

View File

@ -4,7 +4,7 @@ import (
"flag" "flag"
"path/filepath" "path/filepath"
"novit.nc/direktil/pkg/localconfig" "novit.tech/direktil/pkg/localconfig"
) )
var ( var (

View File

@ -0,0 +1,9 @@
package main
import (
"net/http"
"m.cluseau.fr/go/httperr"
)
var ErrNotFound = httperr.NewStd(404, http.StatusNotFound, "not found")

View File

@ -8,7 +8,7 @@ import (
"net/http" "net/http"
"os" "os"
cpio "github.com/cavaliercoder/go-cpio" cpio "github.com/cavaliergopher/cpio"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
) )
@ -58,7 +58,7 @@ func buildInitrd(out io.Writer, ctx *renderContext) error {
} { } {
archive.WriteHeader(&cpio.Header{ archive.WriteHeader(&cpio.Header{
Name: dir, Name: dir,
Mode: 0600 | cpio.ModeDir, Mode: cpio.FileMode(0600 | os.ModeDir),
}) })
} }

View File

@ -1,8 +1,10 @@
package main package main
import ( import (
"io"
"log" "log"
"net/http" "net/http"
"os"
) )
func renderKernel(w http.ResponseWriter, r *http.Request, ctx *renderContext) error { func renderKernel(w http.ResponseWriter, r *http.Request, ctx *renderContext) error {
@ -15,3 +17,19 @@ func renderKernel(w http.ResponseWriter, r *http.Request, ctx *renderContext) er
http.ServeFile(w, r, path) http.ServeFile(w, r, path)
return nil return nil
} }
func fetchKernel(out io.Writer, ctx *renderContext) (err error) {
path, err := ctx.distFetch("kernels", ctx.Host.Kernel)
if err != nil {
return err
}
in, err := os.Open(path)
if err != nil {
return
}
defer in.Close()
_, err = io.Copy(out, in)
return
}

View File

@ -4,13 +4,17 @@ import (
"flag" "flag"
"log" "log"
"net/http" "net/http"
"os"
"path/filepath" "path/filepath"
restful "github.com/emicklei/go-restful" restful "github.com/emicklei/go-restful"
"github.com/mcluseau/go-swagger-ui" swaggerui "github.com/mcluseau/go-swagger-ui"
"novit.nc/direktil/pkg/cas" "m.cluseau.fr/go/watchable/streamsse"
"novit.nc/direktil/local-server/pkg/apiutils" "novit.tech/direktil/pkg/cas"
dlshtml "novit.tech/direktil/local-server/html"
"novit.tech/direktil/local-server/pkg/apiutils"
) )
const ( const (
@ -23,25 +27,58 @@ var (
certFile = flag.String("tls-cert", etcDir+"/server.crt", "Server TLS certificate") certFile = flag.String("tls-cert", etcDir+"/server.crt", "Server TLS certificate")
keyFile = flag.String("tls-key", etcDir+"/server.key", "Server TLS key") keyFile = flag.String("tls-key", etcDir+"/server.key", "Server TLS key")
autoUnlock = flag.String("auto-unlock", "", "Auto-unlock store (testing only!)")
casStore cas.Store casStore cas.Store
) )
func main() { func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
flag.Parse() flag.Parse()
if *address == "" && *tlsAddress == "" { if *address == "" && *tlsAddress == "" {
log.Fatal("no listen address given") log.Fatal("no listen address given")
} }
computeUIHash()
openSecretStore()
{
autoUnlock := *autoUnlock
if autoUnlock == "" {
autoUnlock = os.Getenv("DLS_AUTO_UNLOCK")
}
if autoUnlock != "" {
log.Printf("auto-unlocking the store")
err := unlockSecretStore([]byte(autoUnlock))
if err != nil {
log.Fatal(err)
}
log.Print("store auto-unlocked, admin token is ", *adminToken)
}
os.Setenv("DLS_AUTO_UNLOCK", "")
}
casStore = cas.NewDir(filepath.Join(*dataDir, "cache")) casStore = cas.NewDir(filepath.Join(*dataDir, "cache"))
go casCleaner() go casCleaner()
apiutils.Setup(func() { apiutils.Setup(func() {
restful.Add(buildWS()) registerWS(restful.DefaultContainer)
}) })
swaggerui.HandleAt("/swagger-ui/") swaggerui.HandleAt("/swagger-ui/")
staticHandler := http.FileServer(http.FS(dlshtml.FS))
http.Handle("/favicon.ico", staticHandler)
http.Handle("/ui/", staticHandler)
http.Handle("/public-state", streamsse.StreamHandler(wPublicState))
http.Handle("/state", requireAdmin(streamsse.StreamHandler(wState)))
if *address != "" { if *address != "" {
log.Print("HTTP listening on ", *address) log.Print("HTTP listening on ", *address)
go log.Fatal(http.ListenAndServe(*address, nil)) go log.Fatal(http.ListenAndServe(*address, nil))

View File

@ -4,36 +4,47 @@ import (
"bytes" "bytes"
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"encoding/json"
"io" "io"
"log" "log"
"net/http" "net/http"
"path" "net/url"
"path/filepath" "path/filepath"
"text/template" "text/template"
cfsslconfig "github.com/cloudflare/cfssl/config" cfsslconfig "github.com/cloudflare/cfssl/config"
"github.com/cloudflare/cfssl/csr" restful "github.com/emicklei/go-restful"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
"novit.nc/direktil/pkg/config" "novit.tech/direktil/pkg/config"
"novit.nc/direktil/pkg/localconfig" "novit.tech/direktil/pkg/localconfig"
bsconfig "novit.tech/direktil/pkg/bootstrapconfig"
) )
var cmdlineParam = restful.QueryParameter("cmdline", "Linux kernel cmdline addition")
type renderContext struct { type renderContext struct {
Host *localconfig.Host Host *localconfig.Host
SSLConfig string SSLConfig string
// Linux kernel extra cmdline
CmdLine string `yaml:"-"`
} }
func renderCtx(w http.ResponseWriter, r *http.Request, ctx *renderContext, what string, func renderCtx(w http.ResponseWriter, r *http.Request, ctx *renderContext, what string,
create func(out io.Writer, ctx *renderContext) error) error { create func(out io.Writer, ctx *renderContext) error) error {
log.Printf("sending %s for %q", what, ctx.Host.Name)
tag, err := ctx.Tag() tag, err := ctx.Tag()
if err != nil { if err != nil {
return err return err
} }
ctx.CmdLine = r.URL.Query().Get(cmdlineParam.Data().Name)
if ctx.CmdLine != "" {
what = what + "?cmdline=" + url.QueryEscape(ctx.CmdLine)
}
// get it or create it // get it or create it
content, meta, err := casStore.GetOrCreate(tag, what, func(out io.Writer) error { content, meta, err := casStore.GetOrCreate(tag, what, func(out io.Writer) error {
log.Printf("building %s for %q", what, ctx.Host.Name) log.Printf("building %s for %q", what, ctx.Host.Name)
@ -45,6 +56,7 @@ func renderCtx(w http.ResponseWriter, r *http.Request, ctx *renderContext, what
} }
// serve it // serve it
log.Printf("sending %s for %q", what, ctx.Host.Name)
http.ServeContent(w, r, what, meta.ModTime(), content) http.ServeContent(w, r, what, meta.ModTime(), content)
return nil return nil
} }
@ -79,9 +91,37 @@ func newRenderContext(host *localconfig.Host, cfg *localconfig.Config) (ctx *ren
} }
func (ctx *renderContext) Config() (ba []byte, cfg *config.Config, err error) { func (ctx *renderContext) Config() (ba []byte, cfg *config.Config, err error) {
ba, err = ctx.render(ctx.Host.Config)
if err != nil {
return
}
cfg = &config.Config{}
if err = yaml.Unmarshal(ba, cfg); err != nil {
return
}
return
}
func (ctx *renderContext) BootstrapConfig() (ba []byte, cfg *bsconfig.Config, err error) {
ba, err = ctx.render(ctx.Host.BootstrapConfig)
if err != nil {
return
}
cfg = &bsconfig.Config{}
if err = yaml.Unmarshal(ba, cfg); err != nil {
return
}
return
}
func (ctx *renderContext) render(templateText string) (ba []byte, err error) {
tmpl, err := template.New(ctx.Host.Name + "/config"). tmpl, err := template.New(ctx.Host.Name + "/config").
Funcs(ctx.templateFuncs()). Funcs(templateFuncs).
Parse(ctx.Host.Config) Parse(templateText)
if err != nil { if err != nil {
return return
@ -100,139 +140,9 @@ func (ctx *renderContext) Config() (ba []byte, cfg *config.Config, err error) {
} }
ba = buf.Bytes() ba = buf.Bytes()
cfg = &config.Config{}
if err = yaml.Unmarshal(buf.Bytes(), cfg); err != nil {
return return
} }
return
}
func (ctx *renderContext) templateFuncs() map[string]interface{} {
getKeyCert := func(cluster, caName, name, profile, label, reqJson string) (kc *KeyCert, err error) {
certReq := &csr.CertificateRequest{
KeyRequest: csr.NewBasicKeyRequest(),
}
err = json.Unmarshal([]byte(reqJson), certReq)
if err != nil {
log.Print("CSR unmarshal failed on: ", reqJson)
return
}
return secretData.KeyCert(cluster, caName, name, profile, label, certReq)
}
asYaml := func(v interface{}) (string, error) {
ba, err := yaml.Marshal(v)
if err != nil {
return "", err
}
return string(ba), nil
}
return map[string]interface{}{
"token": func(cluster, name string) (s string, err error) {
return secretData.Token(cluster, name)
},
"ca_key": func(cluster, name string) (s string, err error) {
ca, err := secretData.CA(cluster, name)
if err != nil {
return
}
s = string(ca.Key)
return
},
"ca_crt": func(cluster, name string) (s string, err error) {
ca, err := secretData.CA(cluster, name)
if err != nil {
return
}
s = string(ca.Cert)
return
},
"ca_dir": func(cluster, name string) (s string, err error) {
ca, err := secretData.CA(cluster, name)
if err != nil {
return
}
dir := "/etc/tls-ca/" + name
return asYaml([]config.FileDef{
{
Path: path.Join(dir, "ca.crt"),
Mode: 0644,
Content: string(ca.Cert),
},
{
Path: path.Join(dir, "ca.key"),
Mode: 0600,
Content: string(ca.Key),
},
})
},
"tls_key": func(cluster, caName, name, profile, label, reqJson string) (s string, err error) {
kc, err := getKeyCert(cluster, caName, name, profile, label, reqJson)
if err != nil {
return
}
s = string(kc.Key)
return
},
"tls_crt": func(cluster, caName, name, profile, label, reqJson string) (s string, err error) {
kc, err := getKeyCert(cluster, caName, name, profile, label, reqJson)
if err != nil {
return
}
s = string(kc.Cert)
return
},
"tls_dir": func(dir, cluster, caName, name, profile, label, reqJson string) (s string, err error) {
ca, err := secretData.CA(cluster, caName)
if err != nil {
return
}
kc, err := getKeyCert(cluster, caName, name, profile, label, reqJson)
if err != nil {
return
}
return asYaml([]config.FileDef{
{
Path: path.Join(dir, "ca.crt"),
Mode: 0644,
Content: string(ca.Cert),
},
{
Path: path.Join(dir, "tls.crt"),
Mode: 0644,
Content: string(kc.Cert),
},
{
Path: path.Join(dir, "tls.key"),
Mode: 0600,
Content: string(kc.Key),
},
})
},
}
}
func (ctx *renderContext) distFilePath(path ...string) string { func (ctx *renderContext) distFilePath(path ...string) string {
return filepath.Join(append([]string{*dataDir, "dist"}, path...)...) return filepath.Join(append([]string{*dataDir, "dist"}, path...)...)
} }

View File

@ -0,0 +1,325 @@
package main
import (
"crypto/rand"
"encoding/base32"
"encoding/json"
"log"
"net/http"
"os"
"path/filepath"
"sort"
"strings"
"sync"
restful "github.com/emicklei/go-restful"
"m.cluseau.fr/go/httperr"
"novit.tech/direktil/local-server/secretstore"
)
var secStore *secretstore.Store
func secStorePath(name string) string { return filepath.Join(*dataDir, "secrets", name) }
func secKeysStorePath() string { return secStorePath(".keys") }
func openSecretStore() {
var err error
keysPath := secKeysStorePath()
if err := os.MkdirAll(filepath.Dir(filepath.Dir(keysPath)), 0755); err != nil {
log.Fatal("failed to create dirs: ", err)
}
if err := os.MkdirAll(filepath.Dir(keysPath), 0700); err != nil {
log.Fatal("failed to secret store dir: ", err)
}
secStore, err = secretstore.Open(keysPath)
switch {
case err == nil:
wPublicState.Change(func(v *PublicState) {
v.Store.New = false
v.Store.Open = false
})
case os.IsNotExist(err):
secStore = secretstore.New()
wPublicState.Change(func(v *PublicState) {
v.Store.New = true
v.Store.Open = false
})
default:
log.Fatal("failed to open keys store: ", err)
}
}
var (
unlockMutex = sync.Mutex{}
ErrStoreAlreadyUnlocked = httperr.NewStd(http.StatusConflict, 1, "store already unlocked")
ErrInvalidPassphrase = httperr.NewStd(http.StatusBadRequest, 2, "invalid passphrase")
)
func unlockSecretStore(passphrase []byte) *httperr.Error {
unlockMutex.Lock()
defer unlockMutex.Unlock()
if secStore.Unlocked() {
return ErrStoreAlreadyUnlocked
}
if secStore.IsNew() {
err := secStore.Init(passphrase)
if err != nil {
return httperr.New(http.StatusInternalServerError, err)
}
err = secStore.SaveTo(secKeysStorePath())
if err != nil {
log.Print("secret store save error: ", err)
secStore.Close()
return httperr.New(http.StatusInternalServerError, err)
}
} else {
if !secStore.Unlock([]byte(passphrase)) {
return ErrInvalidPassphrase
}
}
token := ""
if err := readSecret("admin-token", &token); err != nil {
if !os.IsNotExist(err) {
log.Print("failed to read admin token: ", err)
secStore.Close()
return httperr.New(http.StatusInternalServerError, err)
}
randBytes := make([]byte, 32)
_, err := rand.Read(randBytes)
if err != nil {
log.Print("rand read error: ", err)
secStore.Close()
return httperr.New(http.StatusInternalServerError, err)
}
token = base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(randBytes)
err = writeSecret("admin-token", token)
if err != nil {
log.Print("write error: ", err)
secStore.Close()
return httperr.New(http.StatusInternalServerError, err)
}
log.Print("wrote new admin token")
}
*adminToken = token
wPublicState.Change(func(v *PublicState) {
v.Store.New = false
v.Store.Open = true
})
go updateState()
go migrateSecrets()
return nil
}
func readSecret(name string, value any) (err error) {
f, err := os.Open(secStorePath(name + ".data"))
if err != nil {
return
}
defer f.Close()
in, err := secStore.NewReader(f)
if err != nil {
return
}
return json.NewDecoder(in).Decode(value)
}
func writeSecret(name string, value any) (err error) {
path := secStorePath(name + ".data.new")
if err = os.MkdirAll(filepath.Dir(path), 0700); err != nil {
return
}
f, err := os.Create(path)
if err != nil {
return
}
err = func() (err error) {
defer f.Close()
out, err := secStore.NewWriter(f)
if err != nil {
return
}
return json.NewEncoder(out).Encode(value)
}()
if err != nil {
return
}
err = os.Rename(f.Name(), secStorePath(name+".data"))
if err != nil {
return
}
go updateState()
return
}
var secL sync.Mutex
func updateSecret[T any](name string, update func(*T)) (err error) {
secL.Lock()
defer secL.Unlock()
v := new(T)
err = readSecret(name, v)
if err != nil {
if !os.IsNotExist(err) {
return
}
err = nil
}
update(v)
return writeSecret(name, *v)
}
func updateSecretWithKey[T any](name, key string, update func(v *T)) (err error) {
secL.Lock()
defer secL.Unlock()
kvs := map[string]*T{}
err = readSecret(name, &kvs)
if err != nil {
if !os.IsNotExist(err) {
return
}
err = nil
}
update(kvs[key])
return writeSecret(name, kvs)
}
type KVSecrets[T any] struct{ Name string }
func (s KVSecrets[T]) Data() (kvs map[string]T, err error) {
kvs = make(map[string]T)
err = readSecret(s.Name, &kvs)
if err != nil {
if !os.IsNotExist(err) {
return
}
err = nil
}
return
}
func (s KVSecrets[T]) Keys(prefix string) (keys []string, err error) {
kvs, err := s.Data()
if err != nil {
return
}
keys = make([]string, 0, len(kvs))
for k := range kvs {
if !strings.HasPrefix(k, prefix) {
continue
}
keys = append(keys, k[len(prefix):])
}
sort.Strings(keys)
return
}
func (s KVSecrets[T]) Get(key string) (v T, found bool, err error) {
kvs, err := s.Data()
if err != nil {
return
}
v, found = kvs[key]
return
}
func (s KVSecrets[T]) Put(key string, v T) (err error) {
secL.Lock()
defer secL.Unlock()
kvs, err := s.Data()
if err != nil {
return
}
kvs[key] = v
err = writeSecret(s.Name, kvs)
return
}
func (s KVSecrets[T]) WsList(resp *restful.Response, prefix string) {
keys, err := s.Keys(prefix)
if err != nil {
httperr.New(http.StatusInternalServerError, err).WriteJSON(resp.ResponseWriter)
return
}
resp.WriteEntity(keys)
}
func (s KVSecrets[T]) WsGet(resp *restful.Response, key string) {
keys, found, err := s.Get(key)
if err != nil {
httperr.New(http.StatusInternalServerError, err).WriteJSON(resp.ResponseWriter)
return
}
if !found {
ErrNotFound.WriteJSON(resp.ResponseWriter)
return
}
resp.WriteEntity(keys)
}
func (s KVSecrets[T]) WsPut(req *restful.Request, resp *restful.Response, key string) {
v := new(T)
err := req.ReadEntity(v)
if err != nil {
httperr.New(http.StatusBadRequest, err).WriteJSON(resp.ResponseWriter)
return
}
err = s.Put(key, *v)
if err != nil {
httperr.New(http.StatusInternalServerError, err).WriteJSON(resp.ResponseWriter)
return
}
}

View File

@ -0,0 +1,75 @@
package main
import (
"log"
"os"
cfsslconfig "github.com/cloudflare/cfssl/config"
)
func migrateSecrets() {
if _, err := os.Stat(secretDataPath()); err != nil {
if os.IsNotExist(err) {
return
}
log.Print("not migrating old secrets: ", err)
return
}
log.Print("migrating old secrets")
log := log.New(log.Default().Writer(), "secrets migration: ", log.Flags()|log.Lmsgprefix)
// load secrets
cfg, err := readConfig()
if err != nil {
log.Fatal(err)
return
}
var sslCfg *cfsslconfig.Config
if len(cfg.SSLConfig) == 0 {
sslCfg = &cfsslconfig.Config{}
} else {
sslCfg, err = cfsslconfig.LoadConfig([]byte(cfg.SSLConfig))
if err != nil {
return
}
}
if err := loadSecretData(sslCfg); err != nil {
log.Fatal(err)
return
}
for clusterName, cluster := range secretData.clusters {
for k, v := range cluster.Tokens {
err = clusterTokens.Put(clusterName+"/"+k, v)
if err != nil {
log.Fatal(err)
return
}
}
for k, v := range cluster.Passwords {
err = clusterPasswords.Put(clusterName+"/"+k, v)
if err != nil {
log.Fatal(err)
return
}
}
for caName, ca := range cluster.CAs {
clusterCAs.Put(clusterName+"/"+caName, CA{Key: ca.Key, Cert: ca.Cert})
for signedName, signed := range ca.Signed {
clusterCASignedKeys.Put(clusterName+"/"+caName+"/"+signedName, *signed)
}
}
// TODO
}
}

View File

@ -1,6 +1,7 @@
package main package main
import ( import (
"crypto"
"crypto/rand" "crypto/rand"
"encoding/base32" "encoding/base32"
"encoding/json" "encoding/json"
@ -10,8 +11,12 @@ import (
"net" "net"
"os" "os"
"path/filepath" "path/filepath"
"sort"
"sync" "sync"
"time"
"github.com/cespare/xxhash"
"github.com/cloudflare/cfssl/certinfo"
"github.com/cloudflare/cfssl/config" "github.com/cloudflare/cfssl/config"
"github.com/cloudflare/cfssl/csr" "github.com/cloudflare/cfssl/csr"
"github.com/cloudflare/cfssl/helpers" "github.com/cloudflare/cfssl/helpers"
@ -25,11 +30,14 @@ import (
var ( var (
secretData *SecretData secretData *SecretData
DontSave = false
) )
type SecretData struct { type SecretData struct {
l sync.Mutex l sync.Mutex
prevHash uint64
clusters map[string]*ClusterSecrets clusters map[string]*ClusterSecrets
changed bool changed bool
config *config.Config config *config.Config
@ -38,6 +46,8 @@ type SecretData struct {
type ClusterSecrets struct { type ClusterSecrets struct {
CAs map[string]*CA CAs map[string]*CA
Tokens map[string]string Tokens map[string]string
Passwords map[string]string
SSHKeyPairs map[string][]SSHKeyPair
} }
type CA struct { type CA struct {
@ -81,6 +91,8 @@ func loadSecretData(config *config.Config) (err error) {
return return
} }
sd.prevHash = xxhash.Sum64(ba)
secretData = sd secretData = sd
return return
} }
@ -89,16 +101,40 @@ func (sd *SecretData) Changed() bool {
return sd.changed return sd.changed
} }
func (sd *SecretData) Save() error { func (sd *SecretData) Save() (err error) {
if DontSave {
return
}
sd.l.Lock() sd.l.Lock()
defer sd.l.Unlock() defer sd.l.Unlock()
log.Info("Saving secret data")
ba, err := json.Marshal(sd.clusters) ba, err := json.Marshal(sd.clusters)
if err != nil { if err != nil {
return err return
}
h := xxhash.Sum64(ba)
if h == sd.prevHash {
return
}
log.Info("Saving secret data")
err = ioutil.WriteFile(secretDataPath(), ba, 0600)
if err == nil {
sd.prevHash = h
}
return
}
func newClusterSecrets() *ClusterSecrets {
return &ClusterSecrets{
CAs: make(map[string]*CA),
Tokens: make(map[string]string),
Passwords: make(map[string]string),
} }
return ioutil.WriteFile(secretDataPath(), ba, 0600)
} }
func (sd *SecretData) cluster(name string) (cs *ClusterSecrets) { func (sd *SecretData) cluster(name string) (cs *ClusterSecrets) {
@ -112,15 +148,47 @@ func (sd *SecretData) cluster(name string) (cs *ClusterSecrets) {
log.Info("secret-data: new cluster: ", name) log.Info("secret-data: new cluster: ", name)
cs = &ClusterSecrets{ cs = newClusterSecrets()
CAs: make(map[string]*CA),
Tokens: make(map[string]string),
}
sd.clusters[name] = cs sd.clusters[name] = cs
sd.changed = true sd.changed = true
return return
} }
func (sd *SecretData) Passwords(cluster string) (passwords []string) {
cs := sd.cluster(cluster)
passwords = make([]string, 0, len(cs.Passwords))
for name := range cs.Passwords {
passwords = append(passwords, name)
}
sort.Strings(passwords)
return
}
func (sd *SecretData) Password(cluster, name string) (password string) {
cs := sd.cluster(cluster)
if cs.Passwords == nil {
cs.Passwords = make(map[string]string)
}
password = cs.Passwords[name]
return
}
func (sd *SecretData) SetPassword(cluster, name, password string) {
cs := sd.cluster(cluster)
if cs.Passwords == nil {
cs.Passwords = make(map[string]string)
}
cs.Passwords[name] = password
sd.changed = true
}
func (sd *SecretData) Token(cluster, name string) (token string, err error) { func (sd *SecretData) Token(cluster, name string) (token string, err error) {
cs := sd.cluster(cluster) cs := sd.cluster(cluster)
@ -147,22 +215,35 @@ func (sd *SecretData) Token(cluster, name string) (token string, err error) {
return return
} }
func (sd *SecretData) CA(cluster, name string) (ca *CA, err error) { func (sd *SecretData) RenewCACert(cluster, name string) (err error) {
cs := sd.cluster(cluster) cs := sd.cluster(cluster)
ca, ok := cs.CAs[name] ca := cs.CAs[name]
if ok {
var signer crypto.Signer
signer, err = helpers.ParsePrivateKeyPEM(ca.Key)
if err != nil {
return
}
newCert, _, err := initca.NewFromSigner(newCACertReq(), signer)
if err != nil {
return return
} }
sd.l.Lock() sd.l.Lock()
defer sd.l.Unlock() defer sd.l.Unlock()
log.Info("secret-data: new CA in cluster ", cluster, ": ", name) cs.CAs[name].Cert = newCert
sd.changed = true
req := &csr.CertificateRequest{ return
}
func newCACertReq() *csr.CertificateRequest {
return &csr.CertificateRequest{
CN: "Direktil Local Server", CN: "Direktil Local Server",
KeyRequest: &csr.BasicKeyRequest{ KeyRequest: &csr.KeyRequest{
A: "ecdsa", A: "ecdsa",
S: 521, // 256, 384, 521 S: 521, // 256, 384, 521
}, },
@ -173,9 +254,43 @@ func (sd *SecretData) CA(cluster, name string) (ca *CA, err error) {
}, },
}, },
} }
}
func (sd *SecretData) CA(cluster, name string) (ca *CA, err error) {
defer func() {
if err != nil {
err = fmt.Errorf("cluster %s CA %s: %w", cluster, name, err)
}
}()
cs := sd.cluster(cluster)
ca, ok := cs.CAs[name]
if ok {
checkErr := checkCertUsable(ca.Cert)
if checkErr != nil {
log.Infof("secret-data cluster %s: CA %s: regenerating certificate: %v", cluster, name, checkErr)
err = sd.RenewCACert(cluster, name)
if err != nil {
err = fmt.Errorf("renew: %w", err)
}
}
return
}
sd.l.Lock()
defer sd.l.Unlock()
log.Info("secret-data: new CA in cluster ", cluster, ": ", name)
req := newCACertReq()
cert, _, key, err := initca.New(req) cert, _, key, err := initca.New(req)
if err != nil { if err != nil {
err = fmt.Errorf("initca: %w", err)
return return
} }
@ -191,6 +306,22 @@ func (sd *SecretData) CA(cluster, name string) (ca *CA, err error) {
return return
} }
func checkCertUsable(certPEM []byte) error {
cert, err := certinfo.ParseCertificatePEM(certPEM)
if err != nil {
return err
}
certDuration := cert.NotAfter.Sub(cert.NotBefore)
delayBeforeRegen := certDuration / 3 // TODO allow configuration
if cert.NotAfter.Sub(time.Now()) < delayBeforeRegen {
return errors.New("too old")
}
return nil
}
func (sd *SecretData) KeyCert(cluster, caName, name, profile, label string, req *csr.CertificateRequest) (kc *KeyCert, err error) { func (sd *SecretData) KeyCert(cluster, caName, name, profile, label string, req *csr.CertificateRequest) (kc *KeyCert, err error) {
for idx, host := range req.Hosts { for idx, host := range req.Hosts {
if ip := net.ParseIP(host); ip != nil { if ip := net.ParseIP(host); ip != nil {
@ -223,15 +354,21 @@ func (sd *SecretData) KeyCert(cluster, caName, name, profile, label string, req
return return
} }
logPrefix := fmt.Sprintf("secret-data: cluster %s: CA %s:", cluster, caName)
rh := hash(req) rh := hash(req)
kc, ok := ca.Signed[name] kc, ok := ca.Signed[name]
if ok && rh == kc.ReqHash { if ok && rh == kc.ReqHash {
err = checkCertUsable(kc.Cert)
if err == nil {
return return
}
log.Infof("%s regenerating certificate: ", err)
} else if ok { } else if ok {
log.Infof("secret-data: cluster %s: CA %s: CSR changed for %s: hash=%q previous=%q", log.Infof("%s CSR changed for %s: hash=%q previous=%q", name, rh, kc.ReqHash)
cluster, caName, name, rh, kc.ReqHash)
} else { } else {
log.Infof("secret-data: cluster %s: CA %s: new CSR for %s", cluster, caName, name) log.Infof("%s new CSR for %s", logPrefix, name)
} }
sd.l.Lock() sd.l.Lock()

View File

@ -0,0 +1,167 @@
package main
import (
"crypto/dsa"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/asn1"
"fmt"
"io/ioutil"
"os"
"os/exec"
)
type SSHKeyPair struct {
Type string
Public string
Private string
}
func (sd *SecretData) SSHKeyPairs(cluster, host string) (pairs []SSHKeyPair, err error) {
cs := sd.cluster(cluster)
if cs.SSHKeyPairs == nil {
cs.SSHKeyPairs = map[string][]SSHKeyPair{}
}
outFile, err := ioutil.TempFile("/tmp", "dls-key.")
if err != nil {
return
}
outPath := outFile.Name()
removeTemp := func() {
os.Remove(outPath)
os.Remove(outPath + ".pub")
}
defer removeTemp()
pairs = cs.SSHKeyPairs[host]
didGenerate := false
genLoop:
for _, keyType := range []string{
"rsa",
"dsa",
"ecdsa",
"ed25519",
} {
for _, pair := range pairs {
if pair.Type == keyType {
continue genLoop
}
}
didGenerate = true
removeTemp()
var out, privKey, pubKey []byte
out, err = exec.Command("ssh-keygen",
"-N", "",
"-C", "root@"+host,
"-f", outPath,
"-t", keyType).CombinedOutput()
if err != nil {
err = fmt.Errorf("ssh-keygen failed: %v: %s", err, string(out))
return
}
privKey, err = ioutil.ReadFile(outPath)
if err != nil {
return
}
os.Remove(outPath)
pubKey, err = ioutil.ReadFile(outPath + ".pub")
if err != nil {
return
}
os.Remove(outPath + ".pub")
pairs = append(pairs, SSHKeyPair{
Type: keyType,
Public: string(pubKey),
Private: string(privKey),
})
}
if didGenerate {
cs.SSHKeyPairs[host] = pairs
err = sd.Save()
}
return
}
func sshKeyGenDSA() (data []byte, pubKey interface{}, err error) {
privKey := &dsa.PrivateKey{}
err = dsa.GenerateParameters(&privKey.Parameters, rand.Reader, dsa.L1024N160)
if err != nil {
return
}
err = dsa.GenerateKey(privKey, rand.Reader)
if err != nil {
return
}
data, err = asn1.Marshal(*privKey)
//data, err = x509.MarshalPKCS8PrivateKey(privKey)
if err != nil {
return
}
pubKey = privKey.PublicKey
return
}
func sshKeyGenRSA() (data []byte, pubKey interface{}, err error) {
privKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return
}
data = x509.MarshalPKCS1PrivateKey(privKey)
pubKey = privKey.Public()
return
}
func sshKeyGenECDSA() (data []byte, pubKey interface{}, err error) {
privKey, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
if err != nil {
return
}
data, err = x509.MarshalPKCS8PrivateKey(privKey)
if err != nil {
return
}
pubKey = privKey.Public()
return
}
func sshKeyGenED25519() (data []byte, pubKey interface{}, err error) {
pubKey, privKey, err := ed25519.GenerateKey(rand.Reader)
data, err = x509.MarshalPKCS8PrivateKey(privKey)
if err != nil {
return
}
return
}

View File

@ -0,0 +1,17 @@
package main
import "testing"
func init() {
DontSave = true
}
func TestSSHKeyGet(t *testing.T) {
sd := &SecretData{
clusters: make(map[string]*ClusterSecrets),
}
if _, err := sd.SSHKeyPairs("test", "host"); err != nil {
t.Error(err)
}
}

View File

@ -0,0 +1,125 @@
package main
import (
"log"
"m.cluseau.fr/go/watchable"
"novit.tech/direktil/pkg/localconfig"
)
type PublicState struct {
UIHash string
Store struct {
New bool
Open bool
}
}
var wPublicState = watchable.New[PublicState]()
type State struct {
HasConfig bool
Clusters []ClusterState
Hosts []HostState
Config *localconfig.Config
Downloads map[string]DownloadSpec
}
type ClusterState struct {
Name string
Addons bool
Passwords []string
Tokens []string
CAs []CAState
}
type HostState struct {
Name string
Cluster string
IPs []string
}
type CAState struct {
Name string
Signed []string
}
var wState = watchable.New[State]()
func init() {
wState.Set(State{Downloads: map[string]DownloadSpec{}})
}
func updateState() {
log.Print("updating state")
cfg, err := readConfig()
if err != nil {
wState.Change(func(v *State) { v.HasConfig = false; v.Config = nil })
return
}
if secStore.IsNew() || !secStore.Unlocked() {
wState.Change(func(v *State) { v.HasConfig = false; v.Config = nil })
return
}
// remove heavy data
clusters := make([]ClusterState, 0, len(cfg.Clusters))
for _, cluster := range cfg.Clusters {
c := ClusterState{
Name: cluster.Name,
Addons: len(cluster.Addons) != 0,
}
c.Passwords, err = clusterPasswords.Keys(c.Name + "/")
if err != nil {
log.Print("failed to read cluster passwords: ", err)
}
c.Tokens, err = clusterTokens.Keys(c.Name + "/")
if err != nil {
log.Print("failed to read cluster tokens: ", err)
}
caNames, err := clusterCAs.Keys(c.Name + "/")
if err != nil {
log.Print("failed to read cluster CAs: ", err)
}
for _, caName := range caNames {
ca := CAState{Name: caName}
signedNames, err := clusterCASignedKeys.Keys(c.Name + "/" + caName + "/")
if err != nil {
log.Print("failed to read cluster CA signed keys: ", err)
}
for _, signedName := range signedNames {
ca.Signed = append(ca.Signed, signedName)
}
c.CAs = append(c.CAs, ca)
}
clusters = append(clusters, c)
}
hosts := make([]HostState, 0, len(cfg.Hosts))
for _, host := range cfg.Hosts {
h := HostState{
Name: host.Name,
Cluster: host.ClusterName,
IPs: host.IPs,
}
hosts = append(hosts, h)
}
// done
wState.Change(func(v *State) {
v.HasConfig = true
//v.Config = cfg
v.Clusters = clusters
v.Hosts = hosts
})
}

View File

@ -0,0 +1,45 @@
package main
import (
"encoding/base32"
"io"
"io/fs"
"log"
"strings"
"github.com/cespare/xxhash"
dlshtml "novit.tech/direktil/local-server/html"
)
func computeUIHash() {
xxh := xxhash.New()
err := fs.WalkDir(dlshtml.FS, "ui", func(path string, entry fs.DirEntry, walkErr error) (err error) {
if walkErr != nil {
err = walkErr
return
}
if entry.IsDir() {
return
}
f, err := dlshtml.FS.Open(path)
if err != nil {
return
}
defer f.Close()
io.Copy(xxh, f)
return nil
})
if err != nil {
log.Fatal("failed to hash UI: ", err)
}
h := strings.ToLower(base32.HexEncoding.WithPadding(base32.NoPadding).EncodeToString(xxh.Sum(nil)))[:5]
log.Printf("UI hash: %s", h)
wPublicState.Change(func(v *PublicState) { v.UIHash = h })
}

View File

@ -1,14 +1,21 @@
package main package main
import ( import (
"crypto/sha1"
"encoding/hex"
"flag" "flag"
"fmt"
"io" "io"
"log" "log"
"net/http" "net/http"
"os" "os"
gopath "path" gopath "path"
"path/filepath" "path/filepath"
"strconv"
"time" "time"
"github.com/dustin/go-humanize"
"github.com/miolini/datacounter"
) )
var ( var (
@ -39,42 +46,82 @@ func (ctx *renderContext) distFetch(path ...string) (outPath string, err error)
return return
} }
tempOutPath := filepath.Join(filepath.Dir(outPath), "._part_"+filepath.Base(outPath))
done := make(chan error, 1)
go func() {
defer resp.Body.Close() defer resp.Body.Close()
defer close(done)
out, err := os.Create(tempOutPath) if resp.StatusCode != 200 {
if err != nil { err = fmt.Errorf("wrong status: %s", resp.Status)
done <- err
return return
} }
defer out.Close() length, _ := strconv.Atoi(resp.Header.Get("Content-Length"))
fOut, err := os.Create(filepath.Join(filepath.Dir(outPath), "._part_"+filepath.Base(outPath)))
if err != nil {
return
}
hash := sha1.New()
body := datacounter.NewReaderCounter(resp.Body)
out := io.MultiWriter(fOut, hash)
done := make(chan error, 1)
go func() {
_, err = io.Copy(out, body)
fOut.Close()
if err != nil {
os.Remove(fOut.Name())
}
_, err = io.Copy(out, resp.Body)
done <- err done <- err
close(done)
}() }()
start := time.Now()
wait: wait:
select { select {
case <-time.After(10 * time.Second): case <-time.After(10 * time.Second):
log.Print("still fetching ", subPath, "...") status := ""
if length != 0 {
count := body.Count()
elapsedDuration := time.Since(start)
progress := float64(count) / float64(length)
elapsed := float64(elapsedDuration)
remaining := time.Duration(elapsed/progress - elapsed)
status = fmt.Sprintf(" (%.2f%%, ETA %v, %s/s)",
progress*100,
remaining.Truncate(time.Second),
humanize.Bytes(uint64(float64(count)/elapsedDuration.Seconds())))
}
log.Printf("still fetching %s%s...", subPath, status)
goto wait goto wait
case err = <-done: case err = <-done:
if err != nil { if err != nil {
log.Print("fetch of ", subPath, " failed: ", err) log.Print("fetch of ", subPath, " failed: ", err)
os.Remove(tempOutPath)
return return
} }
log.Print("fetch of ", subPath, " finished")
} }
err = os.Rename(tempOutPath, outPath) hexSum := hex.EncodeToString(hash.Sum(nil))
log.Printf("fetch of %s finished (SHA1 checksum: %s)", subPath, hexSum)
if remoteSum := resp.Header.Get("X-Content-SHA1"); remoteSum != "" {
log.Printf("fetch of %s: remote SHA1 checksum: %s", subPath, remoteSum)
if remoteSum != hexSum {
err = fmt.Errorf("wrong SHA1 checksum: server=%s local=%s", remoteSum, hexSum)
log.Print("fetch of ", subPath, ": ", err)
os.Remove(fOut.Name())
return
}
}
err = os.Rename(fOut.Name(), outPath)
return return
} }

View File

@ -33,6 +33,10 @@ func getToken(req *restful.Request) string {
token := req.HeaderParameter("Authorization") token := req.HeaderParameter("Authorization")
if token == "" {
return req.QueryParameter("token")
}
if !strings.HasPrefix(token, bearerPrefix) { if !strings.HasPrefix(token, bearerPrefix) {
return "" return ""
} }

View File

@ -0,0 +1,35 @@
package main
import (
restful "github.com/emicklei/go-restful"
)
var clusterCAs = newClusterSecretKV[CA]("CAs")
func wsClusterCAs(req *restful.Request, resp *restful.Response) {
clusterName := req.PathParameter("cluster-name")
clusterCAs.WsList(resp, clusterName+"/")
}
func wsClusterCA(req *restful.Request, resp *restful.Response) {
clusterName := req.PathParameter("cluster-name")
name := req.PathParameter("ca-name")
clusterCAs.WsGet(resp, clusterName+"/"+name)
}
var clusterCASignedKeys = newClusterSecretKV[KeyCert]("CA-signed-keys")
func wsClusterCASignedKeys(req *restful.Request, resp *restful.Response) {
clusterName := req.PathParameter("cluster-name")
caName := req.PathParameter("ca-name")
clusterCASignedKeys.WsList(resp, clusterName+"/"+caName+"/")
}
func wsClusterCASignedKey(req *restful.Request, resp *restful.Response) {
clusterName := req.PathParameter("cluster-name")
caName := req.PathParameter("ca-name")
name := req.PathParameter("signed-name")
clusterCASignedKeys.WsGet(resp, clusterName+"/"+caName+"/"+name)
}

View File

@ -0,0 +1,30 @@
package main
import (
restful "github.com/emicklei/go-restful"
)
var clusterPasswords = newClusterSecretKV[string]("passwords")
func wsClusterPasswords(req *restful.Request, resp *restful.Response) {
clusterName := req.PathParameter("cluster-name")
clusterPasswords.WsList(resp, clusterName+"/")
}
func wsClusterPassword(req *restful.Request, resp *restful.Response) {
clusterName := req.PathParameter("cluster-name")
name := req.PathParameter("password-name")
clusterPasswords.WsGet(resp, clusterName+"/"+name)
}
func wsClusterSetPassword(req *restful.Request, resp *restful.Response) {
cluster := wsReadCluster(req, resp)
if cluster == nil {
return
}
name := req.PathParameter("password-name")
clusterPasswords.WsPut(req, resp, cluster.Name+"/"+name)
}

View File

@ -0,0 +1,19 @@
package main
import (
restful "github.com/emicklei/go-restful"
)
var clusterTokens = newClusterSecretKV[string]("tokens")
func wsClusterTokens(req *restful.Request, resp *restful.Response) {
clusterName := req.PathParameter("cluster-name")
clusterTokens.WsList(resp, clusterName+"/")
}
func wsClusterToken(req *restful.Request, resp *restful.Response) {
clusterName := req.PathParameter("cluster-name")
name := req.PathParameter("token-name")
clusterTokens.WsGet(resp, clusterName+"/"+name)
}

View File

@ -2,11 +2,20 @@ package main
import ( import (
"log" "log"
"sort"
restful "github.com/emicklei/go-restful" restful "github.com/emicklei/go-restful"
"novit.nc/direktil/pkg/localconfig"
"novit.tech/direktil/pkg/localconfig"
) )
var clusterSecretKVs = []string{}
func newClusterSecretKV[T any](name string) KVSecrets[T] {
clusterSecretKVs = append(clusterSecretKVs, name)
return KVSecrets[T]{"clusters/"+name}
}
func wsListClusters(req *restful.Request, resp *restful.Response) { func wsListClusters(req *restful.Request, resp *restful.Response) {
cfg := wsReadConfig(resp) cfg := wsReadConfig(resp)
if cfg == nil { if cfg == nil {
@ -59,5 +68,57 @@ func wsClusterAddons(req *restful.Request, resp *restful.Response) {
return return
} }
resp.Write([]byte(cluster.Addons)) wsRender(resp, cluster.Addons, cluster)
}
func wsClusterCACert(req *restful.Request, resp *restful.Response) {
cs := secretData.clusters[req.PathParameter("cluster-name")]
if cs == nil {
wsNotFound(req, resp)
return
}
ca := cs.CAs[req.PathParameter("ca-name")]
if ca == nil {
wsNotFound(req, resp)
return
}
resp.Write(ca.Cert)
}
func wsClusterSignedCert(req *restful.Request, resp *restful.Response) {
cs := secretData.clusters[req.PathParameter("cluster-name")]
if cs == nil {
wsNotFound(req, resp)
return
}
ca := cs.CAs[req.PathParameter("ca-name")]
if ca == nil {
wsNotFound(req, resp)
return
}
name := req.QueryParameter("name")
if name == "" {
keys := make([]string, 0, len(ca.Signed))
for k := range ca.Signed {
keys = append(keys, k)
}
sort.Strings(keys)
resp.WriteJson(keys, restful.MIME_JSON)
return
}
kc := ca.Signed[name]
if kc == nil {
wsNotFound(req, resp)
return
}
resp.Write(kc.Cert)
} }

View File

@ -35,8 +35,23 @@ func writeNewConfig(reader io.Reader) (err error) {
return return
} }
archivesPath := filepath.Join(*dataDir, "archives")
cfgPath := configFilePath() cfgPath := configFilePath()
in, err := os.Open(cfgPath)
if err == nil {
err = backupCurrentConfig(in)
} else if !os.IsNotExist(err) {
return
}
err = os.Rename(out.Name(), cfgPath)
updateState()
return
}
func backupCurrentConfig(in io.ReadCloser) (err error) {
archivesPath := filepath.Join(*dataDir, "archives")
err = os.MkdirAll(archivesPath, 0700) err = os.MkdirAll(archivesPath, 0700)
if err != nil { if err != nil {
@ -52,11 +67,6 @@ func writeNewConfig(reader io.Reader) (err error) {
defer bck.Close() defer bck.Close()
in, err := os.Open(cfgPath)
if err != nil {
return
}
gz, err := gzip.NewWriterLevel(bck, 2) gz, err := gzip.NewWriterLevel(bck, 2)
if err != nil { if err != nil {
return return
@ -66,10 +76,5 @@ func writeNewConfig(reader io.Reader) (err error) {
gz.Close() gz.Close()
in.Close() in.Close()
if err != nil {
return
}
err = os.Rename(out.Name(), cfgPath)
return return
} }

View File

@ -0,0 +1,150 @@
package main
import (
"crypto/rand"
"encoding/base32"
"log"
"net/http"
"strconv"
"time"
restful "github.com/emicklei/go-restful"
"m.cluseau.fr/go/cow"
)
type DownloadSpec struct {
Kind string
Name string
Assets []string
createdAt time.Time
}
func wsAuthorizeDownload(req *restful.Request, resp *restful.Response) {
var spec DownloadSpec
if err := req.ReadEntity(&spec); err != nil {
wsError(resp, err)
return
}
if spec.Kind == "" || spec.Name == "" || len(spec.Assets) == 0 {
resp.WriteErrorString(http.StatusBadRequest, "missing data")
return
}
randBytes := make([]byte, 32)
_, err := rand.Read(randBytes)
if err != nil {
wsError(resp, err)
return
}
token := base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(randBytes)
spec.createdAt = time.Now()
wState.Change(func(v *State) {
cow.MapSet(&v.Downloads, token, spec)
})
log.Printf("download token created for %s %q, assets %q", spec.Kind, spec.Name, spec.Assets)
resp.WriteAsJson(token)
}
func wsDownload(req *restful.Request, resp *restful.Response) {
token := req.PathParameter("token")
asset := req.PathParameter("asset")
if token == "" || asset == "" {
wsNotFound(req, resp)
return
}
var spec DownloadSpec
found := false
wState.Change(func(v *State) {
var ok bool
spec, ok = v.Downloads[token]
if !ok {
return
}
newAssets := make([]string, 0, len(spec.Assets))
for _, a := range spec.Assets {
if a == asset {
found = true
} else {
newAssets = append(newAssets, a)
}
}
if !found {
return
}
cow.Map(&v.Downloads)
if len(newAssets) == 0 {
delete(v.Downloads, token)
} else {
spec.Assets = newAssets
v.Downloads[token] = spec
}
})
if !found {
wsNotFound(req, resp)
return
}
log.Printf("download via token %q", token)
cfg, err := readConfig()
if err != nil {
wsError(resp, err)
return
}
setHeader := func(ext string) {
resp.AddHeader("Content-Disposition", "attachment; filename="+strconv.Quote(spec.Kind+"_"+spec.Name+"_"+asset+ext))
}
switch spec.Kind {
case "cluster":
cluster := cfg.ClusterByName(spec.Name)
if cluster == nil {
wsNotFound(req, resp)
return
}
switch asset {
case "addons":
setHeader(".yaml")
resp.Write([]byte(cluster.Addons))
default:
wsNotFound(req, resp)
}
case "host":
host := cfg.Host(spec.Name)
if host == nil {
wsNotFound(req, resp)
return
}
switch asset {
case "config", "bootstrap-config":
setHeader(".yaml")
default:
setHeader("")
}
renderHost(resp.ResponseWriter, req.Request, asset, host, cfg)
default:
wsNotFound(req, resp)
}
}

View File

@ -8,8 +8,9 @@ import (
restful "github.com/emicklei/go-restful" restful "github.com/emicklei/go-restful"
"novit.nc/direktil/local-server/pkg/mime" "novit.tech/direktil/pkg/localconfig"
"novit.nc/direktil/pkg/localconfig"
"novit.tech/direktil/local-server/pkg/mime"
) )
var trustXFF = flag.Bool("trust-xff", true, "Trust the X-Forwarded-For header") var trustXFF = flag.Bool("trust-xff", true, "Trust the X-Forwarded-For header")
@ -27,7 +28,8 @@ func (ws *wsHost) register(rws *restful.WebService, alterRB func(*restful.RouteB
for _, rb := range []*restful.RouteBuilder{ for _, rb := range []*restful.RouteBuilder{
rws.GET(ws.prefix).To(ws.get). rws.GET(ws.prefix).To(ws.get).
Doc("Get the " + ws.hostDoc + "'s details"), Doc("Get the "+ws.hostDoc+"'s details").
Returns(200, "OK", localconfig.Host{}),
// raw configuration // raw configuration
b("config"). b("config").
@ -54,10 +56,14 @@ func (ws *wsHost) register(rws *restful.WebService, alterRB func(*restful.RouteB
b("boot.tar"). b("boot.tar").
Produces(mime.TAR). Produces(mime.TAR).
Doc("Get the " + ws.hostDoc + "'s /boot archive (ie: for metal upgrades)"), Doc("Get the " + ws.hostDoc + "'s /boot archive (ie: for metal upgrades)"),
b("boot-efi.tar").
Produces(mime.TAR).
Doc("Get the " + ws.hostDoc + "'s /boot archive (ie: for metal upgrades)"),
// read-only ISO support // read-only ISO support
b("boot.iso"). b("boot.iso").
Produces(mime.ISO). Produces(mime.ISO).
Param(cmdlineParam).
Doc("Get the " + ws.hostDoc + "'s boot CD-ROM image"), Doc("Get the " + ws.hostDoc + "'s boot CD-ROM image"),
// netboot support // netboot support
@ -72,6 +78,26 @@ func (ws *wsHost) register(rws *restful.WebService, alterRB func(*restful.RouteB
b("initrd"). b("initrd").
Produces(mime.OCTET). Produces(mime.OCTET).
Doc("Get the " + ws.hostDoc + "'s initial RAM disk (ie: for netboot)"), Doc("Get the " + ws.hostDoc + "'s initial RAM disk (ie: for netboot)"),
// boot v2
// - bootstrap config
b("bootstrap-config").
Produces(mime.YAML).
Doc("Get the " + ws.hostDoc + "'s bootstrap configuration"),
b("bootstrap-config.json").
Doc("Get the " + ws.hostDoc + "'s bootstrap configuration (as JSON)"),
// - initrd
b("initrd-v2").
Produces(mime.OCTET).
Doc("Get the " + ws.hostDoc + "'s initial RAM disk (v2)"),
// - bootstrap
b("bootstrap.tar").
Produces(mime.TAR).
Doc("Get the " + ws.hostDoc + "'s bootstrap seed archive"),
b("boot-v2.iso").
Produces(mime.ISO).
Param(cmdlineParam).
Doc("Get the " + ws.hostDoc + "'s boot CD-ROM image (v2)"),
} { } {
alterRB(rb) alterRB(rb)
rws.Route(rb) rws.Route(rb)
@ -149,6 +175,8 @@ func renderHost(w http.ResponseWriter, r *http.Request, what string, host *local
case "boot.tar": case "boot.tar":
err = renderCtx(w, r, ctx, what, buildBootTar) err = renderCtx(w, r, ctx, what, buildBootTar)
case "boot-efi.tar":
err = renderCtx(w, r, ctx, what, buildBootEFITar)
case "boot.img": case "boot.img":
err = renderCtx(w, r, ctx, what, buildBootImg) err = renderCtx(w, r, ctx, what, buildBootImg)
@ -159,6 +187,18 @@ func renderHost(w http.ResponseWriter, r *http.Request, what string, host *local
case "boot.img.lz4": case "boot.img.lz4":
err = renderCtx(w, r, ctx, what, buildBootImgLZ4) err = renderCtx(w, r, ctx, what, buildBootImgLZ4)
// boot v2
case "bootstrap-config":
err = renderBootstrapConfig(w, r, ctx, false)
case "bootstrap-config.json":
err = renderBootstrapConfig(w, r, ctx, true)
case "initrd-v2":
err = renderCtx(w, r, ctx, what, buildInitrdV2)
case "bootstrap.tar":
err = renderCtx(w, r, ctx, what, buildBootstrap)
case "boot-v2.iso":
err = renderCtx(w, r, ctx, what, buildBootISOv2)
default: default:
http.NotFound(w, r) http.NotFound(w, r)
} }

View File

@ -0,0 +1,23 @@
package main
import (
"net/http"
restful "github.com/emicklei/go-restful"
)
func wsUnlockStore(req *restful.Request, resp *restful.Response) {
var passphrase string
err := req.ReadEntity(&passphrase)
if err != nil {
resp.WriteError(http.StatusBadRequest, err)
return
}
if err := unlockSecretStore([]byte(passphrase)); err != nil {
err.WriteJSON(resp.ResponseWriter)
return
}
resp.WriteEntity(*adminToken)
}

View File

@ -0,0 +1,44 @@
package main
import (
"net/http"
"os"
"path/filepath"
restful "github.com/emicklei/go-restful"
yaml "gopkg.in/yaml.v2"
)
type SSH_ACL struct {
Keys []string
Clusters []string
Groups []string
Hosts []string
}
func loadSSH_ACLs() (acls []SSH_ACL, err error) {
f, err := os.Open(filepath.Join(*dataDir, "ssh-acls.yaml"))
if err != nil {
return
}
defer f.Close()
err = yaml.NewDecoder(f).Decode(&acls)
return
}
func wsSSH_ACL_List(req *restful.Request, resp *restful.Response) {
// TODO
http.NotFound(resp.ResponseWriter, req.Request)
}
func wsSSH_ACL_Get(req *restful.Request, resp *restful.Response) {
// TODO
http.NotFound(resp.ResponseWriter, req.Request)
}
func wsSSH_ACL_Set(req *restful.Request, resp *restful.Response) {
// TODO
http.NotFound(resp.ResponseWriter, req.Request)
}

View File

@ -1,48 +1,106 @@
package main package main
import ( import (
"fmt"
"log" "log"
"net" "net"
"net/http" "net/http"
"strings" "strings"
"text/template"
"github.com/emicklei/go-restful" "github.com/emicklei/go-restful"
"novit.nc/direktil/local-server/pkg/mime"
"novit.nc/direktil/pkg/localconfig" "novit.tech/direktil/pkg/localconfig"
"novit.tech/direktil/local-server/pkg/mime"
) )
func buildWS() *restful.WebService { func registerWS(rest *restful.Container) {
// public-level APIs
{
ws := &restful.WebService{} ws := &restful.WebService{}
ws.
Path("/public").
Produces("application/json").
Consumes("application/json").
Route(ws.POST("/unlock-store").To(wsUnlockStore).
Reads("").
Writes("").
Doc("Try to unlock the store")).
Route(ws.GET("/downloads/{token}/{asset}").To(wsDownload).
Param(ws.PathParameter("token", "the download token")).
Param(ws.PathParameter("asset", "the requested asset")).
Doc("Fetch an asset via a download token"))
// configs API rest.Add(ws)
ws.Route(ws.POST("/configs").Filter(adminAuth).To(wsUploadConfig). }
// Admin-level APIs
ws := &restful.WebService{}
ws.
Filter(adminAuth).
HeaderParameter("Authorization", "Admin bearer token")
// - downloads
ws.Route(ws.POST("/authorize-download").To(wsAuthorizeDownload).
Doc("Create a download token for the given download"))
// - configs API
ws.Route(ws.POST("/configs").To(wsUploadConfig).
Doc("Upload a new current configuration, archiving the previous one")) Doc("Upload a new current configuration, archiving the previous one"))
// clusters API // - clusters API
ws.Route(ws.GET("/clusters").Filter(adminAuth).To(wsListClusters). ws.Route(ws.GET("/clusters").To(wsListClusters).
Doc("List clusters")) Doc("List clusters"))
ws.Route(ws.GET("/clusters/{cluster-name}").Filter(adminAuth).To(wsCluster). const (
Doc("Get cluster details")) GET = http.MethodGet
PUT = http.MethodPut
)
ws.Route(ws.GET("/clusters/{cluster-name}/addons").Filter(adminAuth).To(wsClusterAddons). cluster := func(method, subPath string) *restful.RouteBuilder {
return ws.Method(method).Path("/clusters/{cluster-name}" + subPath).
Param(ws.PathParameter("cluster-name", "name of the cluster"))
}
for _, builder := range []*restful.RouteBuilder{
cluster(GET, "").To(wsCluster).
Doc("Get cluster details"),
cluster(GET, "/addons").To(wsClusterAddons).
Produces(mime.YAML). Produces(mime.YAML).
Doc("Get cluster addons"). Doc("Get cluster addons").
Returns(http.StatusOK, "OK", nil). Returns(http.StatusOK, "OK", nil).
Returns(http.StatusNotFound, "The cluster does not exists or does not have addons defined", nil)) Returns(http.StatusNotFound, "The cluster does not exists or does not have addons defined", nil),
// hosts API cluster(GET, "/tokens").To(wsClusterTokens).
ws.Route(ws.GET("/hosts").Filter(hostsAuth).To(wsListHosts). Doc("List cluster's tokens"),
cluster(GET, "/tokens/{token-name}").To(wsClusterToken).
Doc("Get cluster's token"),
cluster(GET, "/passwords").To(wsClusterPasswords).
Doc("List cluster's passwords"),
cluster(GET, "/passwords/{password-name}").To(wsClusterPassword).
Doc("Get cluster's password"),
cluster(PUT, "/passwords/{password-name}").To(wsClusterSetPassword).
Doc("Set cluster's password"),
cluster(GET, "/CAs").To(wsClusterCAs).
Doc("Get cluster CAs"),
cluster(GET, "/CAs/{ca-name}/certificate").To(wsClusterCACert).
Produces(mime.CACERT).
Doc("Get cluster CA's certificate"),
cluster(GET, "/CAs/{ca-name}/signed").To(wsClusterSignedCert).
Produces(mime.CERT).
Param(ws.QueryParameter("name", "signed reference name").Required(true)).
Doc("Get cluster's certificate signed by the CA"),
} {
ws.Route(builder)
}
ws.Route(ws.GET("/hosts").To(wsListHosts).
Doc("List hosts")) Doc("List hosts"))
(&wsHost{
prefix: "/me",
hostDoc: "detected host",
getHost: detectHost,
}).register(ws, func(rb *restful.RouteBuilder) {
rb.Notes("In this case, the host is detected from the remote IP")
})
(&wsHost{ (&wsHost{
prefix: "/hosts/{host-name}", prefix: "/hosts/{host-name}",
hostDoc: "given host", hostDoc: "given host",
@ -50,10 +108,29 @@ func buildWS() *restful.WebService {
return req.PathParameter("host-name") return req.PathParameter("host-name")
}, },
}).register(ws, func(rb *restful.RouteBuilder) { }).register(ws, func(rb *restful.RouteBuilder) {
rb.Filter(adminAuth)
}) })
return ws ws.Route(ws.GET("/ssh-acls").To(wsSSH_ACL_List))
ws.Route(ws.GET("/ssh-acls/{acl-name}").To(wsSSH_ACL_Get))
ws.Route(ws.PUT("/ssh-acls/{acl-name}").To(wsSSH_ACL_Set))
rest.Add(ws)
// Hosts API
ws = &restful.WebService{}
ws.Produces("application/json")
ws.Path("/me")
ws.Filter(hostsAuth).
HeaderParameter("Authorization", "Host or admin bearer token")
(&wsHost{
hostDoc: "detected host",
getHost: detectHost,
}).register(ws, func(rb *restful.RouteBuilder) {
rb.Notes("In this case, the host is detected from the remote IP")
})
rest.Add(ws)
} }
func detectHost(req *restful.Request) string { func detectHost(req *restful.Request) string {
@ -103,8 +180,22 @@ func wsNotFound(req *restful.Request, resp *restful.Response) {
} }
func wsError(resp *restful.Response, err error) { func wsError(resp *restful.Response, err error) {
log.Print("request failed: ", err) log.Output(2, fmt.Sprint("request failed: ", err))
resp.WriteErrorString( resp.WriteErrorString(
http.StatusInternalServerError, http.StatusInternalServerError,
http.StatusText(http.StatusInternalServerError)) http.StatusText(http.StatusInternalServerError))
} }
func wsRender(resp *restful.Response, tmplStr string, value interface{}) {
tmpl, err := template.New("wsRender").Funcs(templateFuncs).Parse(tmplStr)
if err != nil {
wsError(resp, err)
return
}
err = tmpl.Execute(resp, value)
if err != nil {
wsError(resp, err)
return
}
}

1
gen-api-js.sh Executable file
View File

@ -0,0 +1 @@
docker run --rm --net=host --user $(id -u) -v ${PWD}:/local swaggerapi/swagger-codegen-cli generate -i http://[::1]:7606/swagger.json -l javascript -o /local/js/api/

97
go.mod
View File

@ -1,34 +1,71 @@
module novit.nc/direktil/local-server module novit.tech/direktil/local-server
go 1.20
require ( require (
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e github.com/cavaliergopher/cpio v1.0.1
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e github.com/cespare/xxhash v1.1.0
github.com/coreos/etcd v3.3.11+incompatible // indirect github.com/cloudflare/cfssl v1.6.3
github.com/emicklei/go-restful v2.8.1+incompatible github.com/dustin/go-humanize v1.0.1
github.com/emicklei/go-restful-openapi v1.0.0 github.com/emicklei/go-restful v2.16.0+incompatible
github.com/go-openapi/jsonpointer v0.18.0 // indirect github.com/emicklei/go-restful-openapi v1.4.1
github.com/go-openapi/jsonreference v0.18.0 // indirect github.com/mcluseau/go-swagger-ui v0.0.0-20191019002626-fd9128c24a34
github.com/go-openapi/spec v0.18.0 // indirect github.com/miolini/datacounter v1.0.3
github.com/go-openapi/swag v0.18.0 // indirect
github.com/gobuffalo/buffalo-plugins v1.12.1 // indirect
github.com/google/certificate-transparency-go v1.0.21 // indirect
github.com/json-iterator/go v1.1.5 // indirect
github.com/markbates/going v1.0.3 // indirect
github.com/mcluseau/go-swagger-ui v0.0.0-20190204031235-fc4ac9154422
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
github.com/pierrec/lz4 v2.0.5+incompatible github.com/pierrec/lz4 v2.6.1+incompatible
github.com/spf13/afero v1.2.1 // indirect golang.org/x/crypto v0.5.0
github.com/src-d/go-git v4.7.0+incompatible // indirect gopkg.in/src-d/go-billy.v4 v4.3.2
github.com/ugorji/go/codec v0.0.0-20190128213124-ee1426cffec0 // indirect gopkg.in/src-d/go-git.v4 v4.13.1
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613 // indirect gopkg.in/yaml.v2 v2.4.0
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 // indirect k8s.io/apimachinery v0.26.1
golang.org/x/sys v0.0.0-20190203050204-7ae0202eb74c // indirect m.cluseau.fr/go v0.0.0-20230206224905-5322a9bff2ec
golang.org/x/tools v0.0.0-20190202235157-7414d4c1f71c // indirect novit.tech/direktil/pkg v0.0.0-20230201224712-5e39572dc50e
gopkg.in/src-d/go-billy.v4 v4.3.0 )
gopkg.in/src-d/go-git.v4 v4.10.0
gopkg.in/yaml.v2 v2.2.2 replace github.com/zmap/zlint/v3 => github.com/zmap/zlint/v3 v3.3.1
k8s.io/apimachinery v0.0.0-20190201131811-df262fa1a1ba
novit.nc/direktil/pkg v0.0.0-20181210211743-9dc80cd34b09 require (
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/frankban/quicktest v1.5.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/spec v0.20.8 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/gobuffalo/envy v1.10.2 // indirect
github.com/gobuffalo/packd v1.0.2 // indirect
github.com/gobuffalo/packr v1.30.1 // indirect
github.com/google/certificate-transparency-go v1.1.4 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/jmoiron/sqlx v1.3.5 // indirect
github.com/joho/godotenv v1.5.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/kisielk/sqlstruct v0.0.0-20210630145711-dae28ed37023 // indirect
github.com/lib/pq v1.10.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/sergi/go-diff v1.3.1 // indirect
github.com/src-d/gcfg v1.4.0 // indirect
github.com/weppos/publicsuffix-go v0.20.0 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/zmap/zcrypto v0.0.0-20230205235340-d51ce4775101 // indirect
github.com/zmap/zlint/v3 v3.1.0 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/net v0.5.0 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/text v0.6.0 // indirect
golang.org/x/tools v0.5.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.90.0 // indirect
k8s.io/utils v0.0.0-20230202215443-34013725500c // indirect
) )

1804
go.sum

File diff suppressed because it is too large Load Diff

8
govc.env Normal file
View File

@ -0,0 +1,8 @@
export GOVC_DATACENTER=
export GOVC_PASSWORD=
export GOVC_URL=
export GOVC_USERNAME=
export GOVC_INSECURE=1
export GOVC_DATASTORE=
export NOVIT_VM_FOLDER=
export NOVIT_ISO_FOLDER=

BIN
html/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.7 KiB

6
html/html.go Normal file
View File

@ -0,0 +1,6 @@
package dlshtml
import "embed"
//go:embed favicon.ico ui
var FS embed.FS

19
html/ui/app.css Normal file
View File

@ -0,0 +1,19 @@
.downloads {
display: flex;
align-content: stretch;
}
.downloads > * {
margin-left: 6pt;
}
.downloads > *:first-child {
margin-left: 0;
}
.downloads > div {
display: flex;
flex-flow: column;
max-height: 100pt;
overflow: auto;
}

87
html/ui/index.html Normal file
View File

@ -0,0 +1,87 @@
<!doctype html>
<html>
<head>
<title>Direktil Local Server</title>
<style>
@import url('./style.css');
@import url('./app.css');
</style>
<script src="js/jsonpatch.min.js" crossorigin="anonymous"></script>
<script src="js/app.js" type="module" defer></script>
<body>
<div id="app">
<header>
<div id="logo">
<img src="/favicon.ico" />
<span>Direktil Local Server</span>
</div>
<div class="utils">
<span id="login-hdr" v-if="session.token">
Logged in
<button class="link" @click="copyText(session.token)">&#x1F5D0;</button>
</span>
<span id="uiHash">ui <code>{{ uiHash || '-----' }}</code></span>
<span class="green" v-if="publicState">&#x1F5F2;</span>
<span class="red" v-else >&#x1F5F2;</span>
</div>
</header>
<div class="error" v-if="error">
<button class="btn-close" @click="error=null">&times;</button>
<div class="code" v-if="error.code">{{ error.code }}</div>
<div class="message">{{ error.message }}</div>
</div>
<template v-if="!publicState">
<p>Not connected.</p>
</template>
<template v-else-if="publicState.Store.New">
<p>Store is new.</p>
<form @submit="unlockStore" action="/public/unlock-store">
<input type="password" v-model="forms.store.pass1" name="passphrase" />
<input type="password" v-model="forms.store.pass2" />
<input type="submit" value="initialize" :disabled="!forms.store.pass1 || forms.store.pass1 != forms.store.pass2" />
</form>
</template>
<template v-else-if="!publicState.Store.Open">
<p>Store is not open.</p>
<form @submit="unlockStore" action="/public/unlock-store">
<input type="password" name="passphrase" v-model="forms.store.pass1" />
<input type="submit" value="unlock" :disabled="!forms.store.pass1" />
</form>
</template>
<template v-else-if="!state">
<p v-if="!session.token">Not logged in.</p>
<p v-else>Invalid token</p>
<form @submit="setToken">
<input type="password" v-model="forms.setToken" />
<input type="submit" value="set token"/>
</form>
</template>
<template v-else>
<div v-if="state.Clusters" id="clusters">
<h2>Clusters</h2>
<div class="sheets">
<Cluster v-for="c in state.Clusters" :cluster="c" :token="session.token" :state="state" />
</div>
</div>
<div v-if="state.Hosts" id="hosts">
<h2>Hosts</h2>
<div class="sheets">
<Host v-for="h in state.Hosts" :host="h" :token="session.token" :state="state" />
</div>
</div>
<pre v-if="false">{{ state }}</pre>
</template>
</div>
</body>
</html>

37
html/ui/js/Cluster.js Normal file
View File

@ -0,0 +1,37 @@
import Downloads from './Downloads.js';
import GetCopy from './GetCopy.js';
export default {
components: { Downloads, GetCopy },
props: [ 'cluster', 'token', 'state' ],
template: `
<div class="cluster">
<div class="title">Cluster {{ cluster.Name }}</div>
<div class="section">Tokens</div>
<section class="links">
<GetCopy v-for="n in cluster.Tokens" :token="token" :name="n" :href="'/clusters/'+cluster.Name+'/tokens/'+n" />
</section>
<div class="section">Passwords</div>
<section class="links">
<GetCopy v-for="n in cluster.Passwords" :token="token" :name="n" :href="'/clusters/'+cluster.Name+'/passwords/'+n" />
</section>
<div class="section">Downloads</div>
<section class="downloads">
<Downloads :token="token" :state="state" kind="cluster" :name="cluster.Name" />
</section>
<div class="section">CAs</div>
<section v-for="ca in cluster.CAs">
{{ ca.Name }}:
<GetCopy :token="token" name="cert" :href="'/clusters/'+cluster.Name+'/CAs/'+ca.Name+'/certificate'" />
<template v-if="ca.Signed">
{{" "}}signed
<template v-for="signed in ca.Signed">
{{" "}}
<GetCopy :token="token" :name="signed" :href="'/clusters/'+cluster.Name+'/CAs/'+ca.Name+'/signed?name='+signed" />
</template>
</template>
</section>
</div>
`
}

66
html/ui/js/Downloads.js Normal file
View File

@ -0,0 +1,66 @@
export default {
props: [ 'kind', 'name', 'token', 'state' ],
data() {
return { createDisabled: false, selectedAssets: {} }
},
computed: {
availableAssets() {
return {
cluster: ['addons'],
host: [
"kernel",
"initrd-v2",
"bootstrap.tar",
"boot-v2.iso",
"config",
"boot.iso",
"boot.tar",
"boot-efi.tar",
"boot.img",
"boot.img.gz",
"boot.img.lz4",
"bootstrap-config",
"initrd",
"ipxe",
],
}[this.kind]
},
downloads() {
let ret = []
Object.entries(this.state.Downloads)
.filter(e => { let d=e[1]; return d.Kind == this.kind && d.Name == this.name })
.forEach(e => {
let token= e[0], d = e[1]
d.Assets.forEach(asset => {
ret.push({name: asset, url: '/public/downloads/'+token+'/'+asset})
})
})
return ret
},
assets() {
return this.availableAssets.filter(a => this.selectedAssets[a])
},
},
methods: {
createToken() {
event.preventDefault()
this.createDisabled = true
fetch('/authorize-download', {
method: 'POST',
body: JSON.stringify({Kind: this.kind, Name: this.name, Assets: this.assets}),
headers: { 'Authorization': 'Bearer ' + this.token },
}).then((resp) => resp.json())
.then((token) => { this.selectedAssets = {}; this.createDisabled = false })
.catch((e) => { alert('failed to create link'); this.createDisabled = false })
},
},
template: `<div class="downloads">
<div class="options">
<span v-for="asset in availableAssets"><label><input type="checkbox" v-model="selectedAssets[asset]" />&nbsp;{{ asset }}</label></span>
</div>
<button :disabled="createDisabled || assets.length==0" @click="createToken">+</button>
<div><a v-for="d in downloads" target="_blank" :href="d.url">{{ d.name }}</a></div>
</div>`
}

21
html/ui/js/GetCopy.js Normal file
View File

@ -0,0 +1,21 @@
export default {
props: [ 'name', 'href', 'token' ],
data() { return {showCopied: false} },
template: `<span class="notif"><div v-if="showCopied">copied!</div><a :href="href" @click="fetchAndCopy()">{{name}}<small>&nbsp;&#x1F5D0;</small></a></span>`,
methods: {
fetchAndCopy() {
event.preventDefault()
fetch(this.href, {
method: 'GET',
headers: { 'Authorization': 'Bearer ' + this.token },
}).then((resp) => resp.headers.get("content-type") == "application/json" ? resp.json() : resp.text())
.then((value) => {
window.navigator.clipboard.writeText(value)
this.showCopied = true
setTimeout(() => { this.showCopied = false }, 1000)
})
.catch((e) => { console.log("failed to get value:", e); alert('failed to get value') })
},
},
}

21
html/ui/js/Host.js Normal file
View File

@ -0,0 +1,21 @@
import Downloads from './Downloads.js';
export default {
components: { Downloads },
props: [ 'host', 'token', 'state' ],
template: `
<div class="host">
<div class="title">Host {{ host.Name }}</div>
<section>
<template v-for="ip in host.IPs">
{{ ip }}
</template>
</section>
<div class="section">Downloads</div>
<section>
<Downloads :token="token" :state="state" kind="host" :name="host.Name" />
</section>
</div>
`
}

162
html/ui/js/app.js Normal file
View File

@ -0,0 +1,162 @@
import { createApp } from './vue.esm-browser.js';
import Cluster from './Cluster.js';
import Host from './Host.js';
createApp({
components: { Cluster, Host },
data() {
return {
forms: {
store: { },
},
session: {},
error: null,
publicState: null,
uiHash: null,
watchingState: false,
state: null,
}
},
mounted() {
this.session = JSON.parse(sessionStorage.state || "{}")
this.watchPublicState()
},
watch: {
session: {
deep: true,
handler(v) {
sessionStorage.state = JSON.stringify(v)
if (v.token && !this.watchingState) {
this.watchState()
this.watchingState = true
}
}
},
publicState: {
deep: true,
handler(v) {
if (v) {
if (this.uiHash && v.UIHash != this.uiHash) {
console.log("reloading")
location.reload()
} else {
this.uiHash = v.UIHash
}
}
},
}
},
methods: {
copyText(text) {
event.preventDefault()
window.navigator.clipboard.writeText(text)
},
setToken() {
event.preventDefault()
this.session.token = this.forms.setToken
this.forms.setToken = null
},
unlockStore() {
this.apiPost('/public/unlock-store', this.forms.store.pass1, (v) => {
this.forms.store = {}
if (v) {
this.session.token = v
if (!this.watchingState) {
this.watchState()
this.watchingState = true
}
}
})
},
apiPost(action, data, onload) {
event.preventDefault()
if (data === undefined) {
throw("action " + action + ": no data")
}
/* TODO
fetch(action, {
method: 'POST',
body: JSON.stringify(data),
})
.then((response) => response.json())
.then((result) => onload)
// */
var xhr = new XMLHttpRequest()
xhr.responseType = 'json'
// TODO spinner, pending aciton notification, or something
xhr.onerror = () => {
// this.actionResults.splice(idx, 1, {...item, done: true, failed: true })
}
xhr.onload = (r) => {
if (xhr.status != 200) {
this.error = xhr.response
return
}
// this.actionResults.splice(idx, 1, {...item, done: true, resp: xhr.responseText})
this.error = null
if (onload) {
onload(xhr.response)
}
}
xhr.open("POST", action)
xhr.setRequestHeader('Accept', 'application/json')
xhr.setRequestHeader('Content-Type', 'application/json')
if (this.session.token) {
xhr.setRequestHeader('Authorization', 'Bearer '+this.session.token)
}
xhr.send(JSON.stringify(data))
},
download(url) {
event.target.target = '_blank'
event.target.href = this.downloadLink(url)
},
downloadLink(url) {
// TODO once-shot download link
return url + '?token=' + this.session.token
},
watchPublicState() {
this.watchStream('publicState', '/public-state')
},
watchState() {
this.watchStream('state', '/state', true)
},
watchStream(field, path, withToken) {
let evtSrc = new EventSource(path + (withToken ? '?token='+this.session.token : ''));
evtSrc.onmessage = (e) => {
let update = JSON.parse(e.data)
console.log("watch "+path+":", update)
if (update.err) {
console.log("watch error from server:", err)
}
if (update.set) {
this[field] = update.set
}
if (update.p) { // patch
new jsonpatch.JSONPatch(update.p, true).apply(this[field])
}
}
evtSrc.onerror = (e) => {
// console.log("event source " + path + " error:", e)
if (evtSrc) evtSrc.close()
this[field] = null
window.setTimeout(() => { this.watchStream(field, path, withToken) }, 1000)
}
},
}
}).mount('#app')

36
html/ui/js/jsonpatch.min.js vendored Normal file

File diff suppressed because one or more lines are too long

16172
html/ui/js/vue.esm-browser.js Normal file

File diff suppressed because it is too large Load Diff

147
html/ui/style.css Normal file
View File

@ -0,0 +1,147 @@
body {
background: white;
}
button[disabled] {
opacity: 0.5;
}
a[href], a[href]:visited, button.link {
border: none;
color: blue;
background: none;
cursor: pointer;
text-decoration: none;
}
table {
border-collapse: collapse;
}
th, td {
border-left: dotted 1pt;
border-right: dotted 1pt;
border-bottom: dotted 1pt;
padding: 2pt 4pt;
}
tr:first-child > th {
border-top: dotted 1pt;
}
th, tr:last-child > td {
border-bottom: solid 1pt;
}
.flat > * { margin-left: 1ex; }
.flat > *:first-child { margin-left: 0; }
.green { color: green; }
.red { color: red; }
@media (prefers-color-scheme: dark) {
body {
background: black;
color: orange;
}
button, input[type=submit] {
background: #333;
color: #eee;
}
a[href], a[href]:visited, button.link {
border: none;
color: #31b0fa;
}
.red { color: #c00; }
}
header {
display: flex;
align-items: center;
border-bottom: 2pt solid;
margin: 0 0 1em 0;
padding: 1ex;
justify-content: space-between;
}
#logo > img {
vertical-align: middle;
}
header .utils > * {
margin-left: 1ex;
}
.error {
display: flex;
position: relative;
background: rgba(255,0,0,0.2);
border: 1pt solid red;
justify-content: space-between;
align-items: center;
}
.error .btn-close,
.error .code {
background: #600;
color: white;
font-weight: bold;
border: none;
align-self: stretch;
padding: 1ex 1em;
}
.error .code {
order: 1;
display: flex;
align-items: center;
text-align: center;
}
.error .message {
order: 2;
padding: 1ex 2em;
}
.error .btn-close {
order: 3;
}
.sheets {
display: flex;
align-items: stretch;
}
.sheets > div {
margin: 0 1ex;
border: 1pt solid;
border-radius: 6pt;
}
.sheets .title {
text-align: center;
font-weight: bold;
font-size: large;
padding: 2pt 6pt;
background: rgba(127,127,127,0.5);
}
.sheets .section {
padding: 2pt 6pt 2pt 6pt;
font-weight: bold;
border-top: 1px dotted;
}
.sheets section {
margin: 2pt 6pt 6pt 6pt;
}
.notif {
display: inline-block;
position: relative;
}
.notif > div:first-child {
position: absolute;
min-width: 100%; height: 100%;
background: white;
opacity: 75%;
text-align: center;
}
.links > * { margin-left: 1ex; }
.links > *:first-child { margin-left: 0; }
@media (prefers-color-scheme: dark) {
.notif > div:first-child {
background: black;
}
}

2
install Executable file
View File

@ -0,0 +1,2 @@
#! /bin/sh
go install -trimpath ./cmd/...

View File

@ -1,6 +1,22 @@
**/*.go Dockerfile { modd.conf {}
**/*.go go.mod go.sum {
prep: go test ./... prep: go test ./...
prep: go install ./cmd/... prep: mkdir -p dist
prep: docker build -t dls . prep: go build -o dist/ -trimpath ./...
#prep: docker build --build-arg GOPROXY=$GOPROXY -t dls .
#daemon +sigterm: /var/lib/direktil/test-run #daemon +sigterm: /var/lib/direktil/test-run
} }
html/**/* {
prep: go build -o dist/ -trimpath ./cmd/dkl-local-server
}
dist/dkl-local-server {
prep: mkdir -p tmp
daemon +sigterm: dist/dkl-local-server -data tmp -auto-unlock test
}
**/*.proto !dist/**/* {
prep: for mod in @mods; do protoc -I ./ --go_out=plugins=grpc,paths=source_relative:. $mod; done
}

View File

@ -1,22 +1,35 @@
package clustersconfig package clustersconfig
import ( import (
"flag"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"log"
"net" "net"
"os"
"path/filepath"
"strings" "strings"
"text/template" "text/template"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
) )
var (
templateDetailsDir = flag.String("template-details-dir",
filepath.Join(os.TempDir(), "dkl-dir2config"),
"write details of template execute in this dir")
templateID = 0
)
type Config struct { type Config struct {
Hosts []*Host Hosts []*Host
Groups []*Group Groups []*Group
Clusters []*Cluster Clusters []*Cluster
Configs []*Template Configs []*Template
StaticPods []*Template `yaml:"static_pods"` StaticPods []*Template `yaml:"static_pods"`
BootstrapPods map[string][]*Template `yaml:"bootstrap_pods"`
Addons map[string][]*Template Addons map[string][]*Template
SSLConfig string `yaml:"ssl_config"` SSLConfig string `yaml:"ssl_config"`
CertRequests []*CertRequest `yaml:"cert_requests"` CertRequests []*CertRequest `yaml:"cert_requests"`
@ -137,7 +150,7 @@ type Template struct {
parsedTemplate *template.Template parsedTemplate *template.Template
} }
func (t *Template) Execute(wr io.Writer, data interface{}, extraFuncs map[string]interface{}) error { func (t *Template) Execute(contextName, elementName string, wr io.Writer, data interface{}, extraFuncs map[string]interface{}) error {
if t.parsedTemplate == nil { if t.parsedTemplate == nil {
var templateFuncs = map[string]interface{}{ var templateFuncs = map[string]interface{}{
"indent": func(indent, s string) (indented string) { "indent": func(indent, s string) (indented string) {
@ -159,13 +172,49 @@ func (t *Template) Execute(wr io.Writer, data interface{}, extraFuncs map[string
t.parsedTemplate = tmpl t.parsedTemplate = tmpl
} }
if *templateDetailsDir != "" {
templateID++
base := filepath.Join(*templateDetailsDir, contextName, fmt.Sprintf("%s-%03d", elementName, templateID))
os.MkdirAll(base, 0700)
base += string(filepath.Separator)
log.Print("writing template details: ", base, "{in,data,out}")
if err := ioutil.WriteFile(base+"in", []byte(t.Template), 0600); err != nil {
return err
}
yamlBytes, err := yaml.Marshal(data)
if err != nil {
return err
}
if err := ioutil.WriteFile(base+"data", yamlBytes, 0600); err != nil {
return err
}
out, err := os.Create(base + "out")
if err != nil {
return err
}
defer out.Close()
wr = io.MultiWriter(wr, out)
}
return t.parsedTemplate.Execute(wr, data) return t.parsedTemplate.Execute(wr, data)
} }
// Host represents a host served by this server. // Host represents a host served by this server.
type Host struct { type Host struct {
WithRev WithRev
Name string Name string
Labels map[string]string
Annotations map[string]string
MAC string MAC string
IP string IP string
IPs []string IPs []string
@ -177,11 +226,16 @@ type Host struct {
// Group represents a group of hosts and provides their configuration. // Group represents a group of hosts and provides their configuration.
type Group struct { type Group struct {
WithRev WithRev
Name string Name string
Labels map[string]string
Annotations map[string]string
Master bool Master bool
IPXE string IPXE string
Kernel string Kernel string
Initrd string Initrd string
BootstrapConfig string `yaml:"bootstrap_config"`
Config string Config string
StaticPods string `yaml:"static_pods"` StaticPods string `yaml:"static_pods"`
Versions map[string]string Versions map[string]string
@ -194,9 +248,14 @@ type Vars map[string]interface{}
// Cluster represents a cluster of hosts, allowing for cluster-wide variables. // Cluster represents a cluster of hosts, allowing for cluster-wide variables.
type Cluster struct { type Cluster struct {
WithRev WithRev
Name string Name string
Labels map[string]string
Annotations map[string]string
Domain string Domain string
Addons string Addons string
BootstrapPods string `yaml:"bootstrap_pods"`
Subnets struct { Subnets struct {
Services string Services string
Pods string Pods string

View File

@ -70,6 +70,7 @@ func (d *Defaults) Load(dir, suffix string, value Rev, data []byte) (err error)
} }
func (d *Defaults) Open(rev, filePath string) (rd io.Reader, err error) { func (d *Defaults) Open(rev, filePath string) (rd io.Reader, err error) {
log.Printf("openning defaults at %s:%s", rev, filePath)
tree, err := d.treeAt(rev) tree, err := d.treeAt(rev)
if err != nil { if err != nil {
return return
@ -94,12 +95,17 @@ func (d *Defaults) ReadAll(rev, filePath string) (ba []byte, err error) {
} }
func (d *Defaults) List(rev, dir string) (names []string, err error) { func (d *Defaults) List(rev, dir string) (names []string, err error) {
log.Printf("listing defaults at %s:%s", rev, dir)
tree, err := d.treeAt(rev) tree, err := d.treeAt(rev)
if err != nil { if err != nil {
return return
} }
dirPrefix := dir + "/"
err = tree.Files().ForEach(func(f *object.File) (err error) { err = tree.Files().ForEach(func(f *object.File) (err error) {
if !strings.HasPrefix(f.Name, dirPrefix) {
return
}
if !strings.HasSuffix(f.Name, ".yaml") { if !strings.HasSuffix(f.Name, ".yaml") {
return return
} }
@ -126,6 +132,11 @@ func (d *Defaults) treeAt(rev string) (tree *object.Tree, err error) {
obj, err = o.Object() obj, err = o.Object()
case *object.Commit: // commit -> tree case *object.Commit: // commit -> tree
msg := o.Message
if len(msg) > 30 {
msg = msg[:27] + "..."
}
log.Printf("open defaults at commit %s: %s", o.Hash.String()[:7], msg)
return o.Tree() return o.Tree()
default: default:

View File

@ -37,7 +37,10 @@ func FromDir(dirPath, defaultsPath string) (*Config, error) {
return nil return nil
} }
config := &Config{Addons: make(map[string][]*Template)} config := &Config{
Addons: make(map[string][]*Template),
BootstrapPods: make(map[string][]*Template),
}
// load clusters // load clusters
names, err := store.List("clusters") names, err := store.List("clusters")
@ -121,13 +124,19 @@ func FromDir(dirPath, defaultsPath string) (*Config, error) {
return nil, err return nil, err
} }
group.BootstrapConfig, err = template(group.Rev(), "configs", group.BootstrapConfig, &config.Configs)
if err != nil {
return nil, fmt.Errorf("failed to load config for group %q: %v", name, err)
}
group.Config, err = template(group.Rev(), "configs", group.Config, &config.Configs) group.Config, err = template(group.Rev(), "configs", group.Config, &config.Configs)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to load config for group %q: %v", name, err) return nil, fmt.Errorf("failed to load config for group %q: %v", name, err)
} }
if Debug { if Debug {
log.Printf("group %q: config=%q static_pods=%q", group.Name, group.Config, group.StaticPods) log.Printf("group %q: config=%q static_pods=%q",
group.Name, group.Config, group.StaticPods)
} }
group.StaticPods, err = template(group.Rev(), "static-pods", group.StaticPods, &config.StaticPods) group.StaticPods, err = template(group.Rev(), "static-pods", group.StaticPods, &config.StaticPods)
@ -189,6 +198,7 @@ func FromDir(dirPath, defaultsPath string) (*Config, error) {
return nil return nil
} }
// cluster addons
for _, cluster := range config.Clusters { for _, cluster := range config.Clusters {
addonSet := cluster.Addons addonSet := cluster.Addons
if len(addonSet) == 0 { if len(addonSet) == 0 {
@ -207,6 +217,25 @@ func FromDir(dirPath, defaultsPath string) (*Config, error) {
config.Addons[addonSet] = templates config.Addons[addonSet] = templates
} }
// cluster bootstrap pods
for _, cluster := range config.Clusters {
bpSet := cluster.BootstrapPods
if bpSet == "" {
continue
}
if _, ok := config.BootstrapPods[bpSet]; ok {
continue
}
templates := make([]*Template, 0)
if err = loadTemplates(cluster.Rev(), path.Join("bootstrap-pods", bpSet), &templates); err != nil {
return nil, err
}
config.BootstrapPods[bpSet] = templates
}
// load SSL configuration // load SSL configuration
if ba, err := ioutil.ReadFile(filepath.Join(dirPath, "ssl-config.json")); err == nil { if ba, err := ioutil.ReadFile(filepath.Join(dirPath, "ssl-config.json")); err == nil {
config.SSLConfig = string(ba) config.SSLConfig = string(ba)

View File

@ -0,0 +1,61 @@
package config
type Config struct {
AntiPhishingCode string `json:"anti_phishing_code"`
Keymap string
Modules string
Auths []Auth
Networks []struct {
Name string
Interfaces []struct {
Var string
N int
Regexps []string
}
Script string
}
LVM []LvmVG
Bootstrap Bootstrap
}
type Auth struct {
Name string
SSHKey string `yaml:"sshKey"`
Password string `yaml:"password"`
}
type LvmVG struct {
VG string
PVs struct {
N int
Regexps []string
}
Defaults struct {
FS string
Raid *RaidConfig
}
LVs []struct {
Name string
Crypt string
FS string
Raid *RaidConfig
Size string
Extents string
}
}
type RaidConfig struct {
Mirrors int
Stripes int
}
type Bootstrap struct {
Dev string
Seed string
}

View File

@ -7,4 +7,6 @@ const (
ISO = "application/x-iso9660-image" ISO = "application/x-iso9660-image"
IPXE = "text/x-ipxe" IPXE = "text/x-ipxe"
OCTET = "application/octet-stream" OCTET = "application/octet-stream"
CERT = "application/x-x509-user-cert"
CACERT = "application/x-x509-ca-cert"
) )

29
pkg/utf16/utf16.go Normal file
View File

@ -0,0 +1,29 @@
package utf16
import (
"encoding/binary"
"fmt"
"unicode/utf8"
)
func FromUTF8(data []byte) (res []byte) {
endian := binary.LittleEndian
res = make([]byte, (len(data)+1)*2)
res = res[:2]
endian.PutUint16(res, 0xfeff)
for len(data) > 0 {
r, size := utf8.DecodeRune(data)
if r > 65535 {
panic(fmt.Errorf("r=0x%x > 0xffff", r))
}
slen := len(res)
res = res[:slen+2]
endian.PutUint16(res[slen:], uint16(r))
data = data[size:]
}
return
}

30
secretstore/io.go Normal file
View File

@ -0,0 +1,30 @@
package secretstore
import (
"crypto/rand"
"encoding/binary"
"fmt"
"io"
)
func readFull(in io.Reader, ba []byte) (err error) {
_, err = io.ReadFull(in, ba)
return
}
func read[T any](in io.Reader) (v T, err error) {
err = binary.Read(in, binary.BigEndian, &v)
return
}
var readSize = read[uint16]
func randRead(ba []byte) (err error) {
err = readFull(rand.Reader, ba)
if err != nil {
err = fmt.Errorf("failed to read random bytes: %w", err)
return
}
return
}

7
secretstore/mem.go Normal file
View File

@ -0,0 +1,7 @@
package secretstore
func memzero(ba []byte) {
for i := range ba {
ba[i] = 0
}
}

68
secretstore/reader.go Normal file
View File

@ -0,0 +1,68 @@
package secretstore
import (
"crypto/aes"
"crypto/cipher"
"io"
)
func (s *Store) NewReader(reader io.Reader) (r io.Reader, err error) {
iv := [aes.BlockSize]byte{}
err = readFull(reader, iv[:])
if err != nil {
return
}
r = storeReader{reader, s.NewDecrypter(iv)}
return
}
type storeReader struct {
reader io.Reader
decrypter cipher.Stream
}
func (r storeReader) Read(ba []byte) (n int, err error) {
n, err = r.reader.Read(ba)
if n > 0 {
r.decrypter.XORKeyStream(ba[:n], ba[:n])
}
return
}
func (s *Store) NewWriter(writer io.Writer) (r io.Writer, err error) {
iv := [aes.BlockSize]byte{}
if err = randRead(iv[:]); err != nil {
return
}
_, err = writer.Write(iv[:])
if err != nil {
return
}
r = storeWriter{writer, s.NewEncrypter(iv)}
return
}
type storeWriter struct {
writer io.Writer
encrypter cipher.Stream
}
func (r storeWriter) Write(ba []byte) (n int, err error) {
if len(ba) == 0 {
return
}
encBA := make([]byte, len(ba))
r.encrypter.XORKeyStream(encBA, ba)
n, err = r.writer.Write(encBA)
return
}

263
secretstore/secret-store.go Normal file
View File

@ -0,0 +1,263 @@
package secretstore
import (
"bufio"
"crypto/aes"
"crypto/cipher"
"crypto/sha512"
"errors"
"fmt"
"io"
"log"
"os"
"syscall"
"golang.org/x/crypto/argon2"
)
type Store struct {
unlocked bool
key [32]byte
salt [aes.BlockSize]byte
keys []keyEntry
}
type keyEntry struct {
hash [64]byte
encKey [32]byte
}
func New() (s *Store) {
s = &Store{}
syscall.Mlock(s.key[:])
return
}
func Open(path string) (s *Store, err error) {
f, err := os.Open(path)
if err != nil {
return
}
defer f.Close()
s = New()
_, err = s.ReadFrom(bufio.NewReader(f))
return
}
func (s *Store) SaveTo(path string) (err error) {
f, err := os.OpenFile(path, syscall.O_CREAT|syscall.O_TRUNC|syscall.O_WRONLY, 0600)
if err != nil {
return
}
defer f.Close()
out := bufio.NewWriter(f)
_, err = s.WriteTo(out)
if err != nil {
return
}
err = out.Flush()
if err != nil {
return
}
return
}
func (s *Store) Close() {
memzero(s.key[:])
syscall.Munlock(s.key[:])
s.unlocked = false
}
func (s *Store) IsNew() bool {
return len(s.keys) == 0
}
func (s *Store) Unlocked() bool {
return s.unlocked
}
func (s *Store) Init(passphrase []byte) (err error) {
err = randRead(s.key[:])
if err != nil {
return
}
err = randRead(s.salt[:])
if err != nil {
return
}
s.AddKey(passphrase)
s.unlocked = true
return
}
func (s *Store) ReadFrom(in io.Reader) (n int64, err error) {
memzero(s.key[:])
s.unlocked = false
defer func() {
if err != nil {
log.Output(2, fmt.Sprintf("failed after %d bytes", n))
}
}()
readFull := func(ba []byte) {
var nr int
nr, err = io.ReadFull(in, ba)
n += int64(nr)
}
// read the salt
readFull(s.salt[:])
if err != nil {
return
}
// read the (encrypted) keys
s.keys = make([]keyEntry, 0)
for {
k := keyEntry{}
readFull(k.hash[:])
if err != nil {
if err == io.EOF {
err = nil
}
return
}
readFull(k.encKey[:])
if err != nil {
return
}
s.keys = append(s.keys, k)
}
}
func (s *Store) WriteTo(out io.Writer) (n int64, err error) {
write := func(ba []byte) {
var nr int
nr, err = out.Write(ba)
n += int64(nr)
}
write(s.salt[:])
if err != nil {
return
}
for _, k := range s.keys {
write(k.hash[:])
if err != nil {
return
}
write(k.encKey[:])
if err != nil {
return
}
}
return
}
var ErrNoSuchKey = errors.New("no such key")
func (s *Store) Unlock(passphrase []byte) (ok bool) {
key, hash := s.keyPairFromPassword(passphrase)
memzero(passphrase)
defer memzero(key[:])
var idx = -1
for i := range s.keys {
if hash == s.keys[i].hash {
idx = i
break
}
}
if idx == -1 {
return
}
s.decryptTo(s.key[:], s.keys[idx].encKey[:], &key)
s.unlocked = true
return true
}
func (s *Store) AddKey(passphrase []byte) {
key, hash := s.keyPairFromPassword(passphrase)
memzero(passphrase)
defer memzero(key[:])
k := keyEntry{hash: hash}
encKey := s.encrypt(s.key[:], &key)
copy(k.encKey[:], encKey)
s.keys = append(s.keys, k)
}
func (s *Store) keyPairFromPassword(password []byte) (key [32]byte, hash [64]byte) {
keySlice := argon2.IDKey(password, s.salt[:], 1, 64*1024, 4, 32)
copy(key[:], keySlice)
memzero(keySlice)
hash = sha512.Sum512(key[:])
return
}
func (s *Store) NewEncrypter(iv [aes.BlockSize]byte) cipher.Stream {
if !s.unlocked {
panic("not unlocked")
}
return newEncrypter(iv, &s.key)
}
func (s *Store) NewDecrypter(iv [aes.BlockSize]byte) cipher.Stream {
if !s.unlocked {
panic("not unlocked")
}
return newDecrypter(iv, &s.key)
}
func (s *Store) encrypt(src []byte, key *[32]byte) (dst []byte) {
dst = make([]byte, len(src))
newEncrypter(s.salt, key).XORKeyStream(dst, src)
return
}
func (s *Store) decryptTo(dst []byte, src []byte, key *[32]byte) {
newDecrypter(s.salt, key).XORKeyStream(dst, src)
}
func newEncrypter(iv [aes.BlockSize]byte, key *[32]byte) cipher.Stream {
c, err := aes.NewCipher(key[:])
if err != nil {
panic(fmt.Errorf("failed to init AES: %w", err))
}
return cipher.NewCFBEncrypter(c, iv[:])
}
func newDecrypter(iv [aes.BlockSize]byte, key *[32]byte) cipher.Stream {
c, err := aes.NewCipher(key[:])
if err != nil {
panic(fmt.Errorf("failed to init AES: %w", err))
}
return cipher.NewCFBDecrypter(c, iv[:])
}

19
update-boot.sh Executable file
View File

@ -0,0 +1,19 @@
#! /bin/bash
dls_url="$1"
set -ex
mount -o remount,rw /boot
if [ -e /boot/previous ]; then
rm -fr /boot/previous
fi
if [ -e /boot/current ]; then
mv /boot/current /boot/previous
fi
curl $dls_url/me/boot.tar |tar xv -C /boot
sync

27
upload-vmware.sh Normal file
View File

@ -0,0 +1,27 @@
set -e
dir=/var/lib/direktil/
PATH=$PATH:$dir
cd $dir
if [ ! -f govc.env ]; then
echo ERROR: govc.env file not found in dir $dir ; exit 1
fi
source govc.env
if [ $# != 2 ]; then
echo "Usage: $0 <VM_NAME> <NOVIT_HOST>" ; exit 1
fi
if [[ -z $NOVIT_VM_FOLDER || -z $NOVIT_ISO_FOLDER ]]; then
echo "ERROR: All GOVC env vars (including NOVIT_VM_FOLDER and NOVIT_ISO_FOLDER) must be provided" ; exit 1
fi
VM=$1
HOST=$2
govc vm.power -off "$NOVIT_VM_FOLDER/$VM" || true
sleep 5
curl localhost:7606/hosts/$HOST/boot.iso | govc datastore.upload - "$NOVIT_ISO_FOLDER/$VM.iso"
sleep 5
govc vm.power -on "$NOVIT_VM_FOLDER/$VM"

View File

@ -1,5 +0,0 @@
*.sublime-*
.DS_Store
*.swp
*.swo
tags

View File

@ -1,7 +0,0 @@
language: go
go:
- 1.4
- 1.5
- 1.6
- tip

View File

@ -1,12 +0,0 @@
Copyright (c) 2012, Martin Angers
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,187 +0,0 @@
# Purell
Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell)
## Install
`go get github.com/PuerkitoBio/purell`
## Changelog
* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121).
* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
* **v0.2.0** : Add benchmarks, Attempt IDN support.
* **v0.1.0** : Initial release.
## Examples
From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
```go
package purell
import (
"fmt"
"net/url"
)
func ExampleNormalizeURLString() {
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
panic(err)
} else {
fmt.Print(normalized)
}
// Output: http://somewebsite.com:80/Amazing%3F/url/
}
func ExampleMustNormalizeURLString() {
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
FlagsUnsafeGreedy)
fmt.Print(normalized)
// Output: http://somewebsite.com/Amazing%FA/url
}
func ExampleNormalizeURL() {
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
panic(err)
} else {
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
fmt.Print(normalized)
}
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
}
```
## API
As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
```go
const (
// Safe normalizations
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
FlagLowercaseHost // http://HOST -> http://host
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
FlagRemoveDefaultPort // http://host:80 -> http://host
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
// Usually safe normalizations
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
// Unsafe normalizations
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
FlagRemoveFragment // http://host/path#fragment -> http://host/path
FlagForceHTTP // https://host -> http://host
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
FlagRemoveWWW // http://www.host/ -> http://host/
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
// Normalizations not in the wikipedia article, required to cover tests cases
// submitted by jehiah
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
// Convenience set of safe normalizations
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
// Convenience set of usually safe normalizations (includes FlagsSafe)
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
// Convenience set of all available flags
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
)
```
For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
The [full godoc reference is available on gopkgdoc][godoc].
Some things to note:
* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
- %24 -> $
- %26 -> &
- %2B-%3B -> +,-./0123456789:;
- %3D -> =
- %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
- %5F -> _
- %61-%7A -> abcdefghijklmnopqrstuvwxyz
- %7E -> ~
* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
### Safe vs Usually Safe vs Unsafe
Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
Consider the following URL:
`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
Normalizing with the `FlagsSafe` gives:
`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
With the `FlagsUsuallySafeGreedy`:
`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
And with `FlagsUnsafeGreedy`:
`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
## TODOs
* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
## Thanks / Contributions
@rogpeppe
@jehiah
@opennota
@pchristopher1275
@zenovich
@beeker1121
## License
The [BSD 3-Clause license][bsd].
[bsd]: http://opensource.org/licenses/BSD-3-Clause
[wiki]: http://en.wikipedia.org/wiki/URL_normalization
[rfc]: http://tools.ietf.org/html/rfc3986#section-6
[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
[pr5]: https://github.com/PuerkitoBio/purell/pull/5
[iss7]: https://github.com/PuerkitoBio/purell/issues/7

View File

@ -1,379 +0,0 @@
/*
Package purell offers URL normalization as described on the wikipedia page:
http://en.wikipedia.org/wiki/URL_normalization
*/
package purell
import (
"bytes"
"fmt"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
"github.com/PuerkitoBio/urlesc"
"golang.org/x/net/idna"
"golang.org/x/text/unicode/norm"
"golang.org/x/text/width"
)
// A set of normalization flags determines how a URL will
// be normalized.
type NormalizationFlags uint
const (
// Safe normalizations
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
FlagLowercaseHost // http://HOST -> http://host
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
FlagRemoveDefaultPort // http://host:80 -> http://host
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
// Usually safe normalizations
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
// Unsafe normalizations
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
FlagRemoveFragment // http://host/path#fragment -> http://host/path
FlagForceHTTP // https://host -> http://host
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
FlagRemoveWWW // http://www.host/ -> http://host/
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
// Normalizations not in the wikipedia article, required to cover tests cases
// submitted by jehiah
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
// Convenience set of safe normalizations
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
// Convenience set of usually safe normalizations (includes FlagsSafe)
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
// Convenience set of all available flags
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
)
const (
defaultHttpPort = ":80"
defaultHttpsPort = ":443"
)
// Regular expressions used by the normalizations
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
var rxEmptyPort = regexp.MustCompile(`:+$`)
// Map of flags to implementation function.
// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
// Since maps have undefined traversing order, make a slice of ordered keys
var flagsOrder = []NormalizationFlags{
FlagLowercaseScheme,
FlagLowercaseHost,
FlagRemoveDefaultPort,
FlagRemoveDirectoryIndex,
FlagRemoveDotSegments,
FlagRemoveFragment,
FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
FlagRemoveDuplicateSlashes,
FlagRemoveWWW,
FlagAddWWW,
FlagSortQuery,
FlagDecodeDWORDHost,
FlagDecodeOctalHost,
FlagDecodeHexHost,
FlagRemoveUnnecessaryHostDots,
FlagRemoveEmptyPortSeparator,
FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
FlagAddTrailingSlash,
}
// ... and then the map, where order is unimportant
var flags = map[NormalizationFlags]func(*url.URL){
FlagLowercaseScheme: lowercaseScheme,
FlagLowercaseHost: lowercaseHost,
FlagRemoveDefaultPort: removeDefaultPort,
FlagRemoveDirectoryIndex: removeDirectoryIndex,
FlagRemoveDotSegments: removeDotSegments,
FlagRemoveFragment: removeFragment,
FlagForceHTTP: forceHTTP,
FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
FlagRemoveWWW: removeWWW,
FlagAddWWW: addWWW,
FlagSortQuery: sortQuery,
FlagDecodeDWORDHost: decodeDWORDHost,
FlagDecodeOctalHost: decodeOctalHost,
FlagDecodeHexHost: decodeHexHost,
FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
FlagRemoveTrailingSlash: removeTrailingSlash,
FlagAddTrailingSlash: addTrailingSlash,
}
// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
// It takes an URL string as input, as well as the normalization flags.
func MustNormalizeURLString(u string, f NormalizationFlags) string {
result, e := NormalizeURLString(u, f)
if e != nil {
panic(e)
}
return result
}
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
// It takes an URL string as input, as well as the normalization flags.
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
parsed, err := url.Parse(u)
if err != nil {
return "", err
}
if f&FlagLowercaseHost == FlagLowercaseHost {
parsed.Host = strings.ToLower(parsed.Host)
}
// The idna package doesn't fully conform to RFC 5895
// (https://tools.ietf.org/html/rfc5895), so we do it here.
// Taken from Go 1.8 cycle source, courtesy of bradfitz.
// TODO: Remove when (if?) idna package conforms to RFC 5895.
parsed.Host = width.Fold.String(parsed.Host)
parsed.Host = norm.NFC.String(parsed.Host)
if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil {
return "", err
}
return NormalizeURL(parsed, f), nil
}
// NormalizeURL returns the normalized string.
// It takes a parsed URL object as input, as well as the normalization flags.
func NormalizeURL(u *url.URL, f NormalizationFlags) string {
for _, k := range flagsOrder {
if f&k == k {
flags[k](u)
}
}
return urlesc.Escape(u)
}
func lowercaseScheme(u *url.URL) {
if len(u.Scheme) > 0 {
u.Scheme = strings.ToLower(u.Scheme)
}
}
func lowercaseHost(u *url.URL) {
if len(u.Host) > 0 {
u.Host = strings.ToLower(u.Host)
}
}
func removeDefaultPort(u *url.URL) {
if len(u.Host) > 0 {
scheme := strings.ToLower(u.Scheme)
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
return ""
}
return val
})
}
}
func removeTrailingSlash(u *url.URL) {
if l := len(u.Path); l > 0 {
if strings.HasSuffix(u.Path, "/") {
u.Path = u.Path[:l-1]
}
} else if l = len(u.Host); l > 0 {
if strings.HasSuffix(u.Host, "/") {
u.Host = u.Host[:l-1]
}
}
}
func addTrailingSlash(u *url.URL) {
if l := len(u.Path); l > 0 {
if !strings.HasSuffix(u.Path, "/") {
u.Path += "/"
}
} else if l = len(u.Host); l > 0 {
if !strings.HasSuffix(u.Host, "/") {
u.Host += "/"
}
}
}
func removeDotSegments(u *url.URL) {
if len(u.Path) > 0 {
var dotFree []string
var lastIsDot bool
sections := strings.Split(u.Path, "/")
for _, s := range sections {
if s == ".." {
if len(dotFree) > 0 {
dotFree = dotFree[:len(dotFree)-1]
}
} else if s != "." {
dotFree = append(dotFree, s)
}
lastIsDot = (s == "." || s == "..")
}
// Special case if host does not end with / and new path does not begin with /
u.Path = strings.Join(dotFree, "/")
if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
u.Path = "/" + u.Path
}
// Special case if the last segment was a dot, make sure the path ends with a slash
if lastIsDot && !strings.HasSuffix(u.Path, "/") {
u.Path += "/"
}
}
}
func removeDirectoryIndex(u *url.URL) {
if len(u.Path) > 0 {
u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
}
}
func removeFragment(u *url.URL) {
u.Fragment = ""
}
func forceHTTP(u *url.URL) {
if strings.ToLower(u.Scheme) == "https" {
u.Scheme = "http"
}
}
func removeDuplicateSlashes(u *url.URL) {
if len(u.Path) > 0 {
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
}
}
func removeWWW(u *url.URL) {
if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
u.Host = u.Host[4:]
}
}
func addWWW(u *url.URL) {
if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
u.Host = "www." + u.Host
}
}
func sortQuery(u *url.URL) {
q := u.Query()
if len(q) > 0 {
arKeys := make([]string, len(q))
i := 0
for k, _ := range q {
arKeys[i] = k
i++
}
sort.Strings(arKeys)
buf := new(bytes.Buffer)
for _, k := range arKeys {
sort.Strings(q[k])
for _, v := range q[k] {
if buf.Len() > 0 {
buf.WriteRune('&')
}
buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
}
}
// Rebuild the raw query string
u.RawQuery = buf.String()
}
}
func decodeDWORDHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
var parts [4]int64
dword, _ := strconv.ParseInt(matches[1], 10, 0)
for i, shift := range []uint{24, 16, 8, 0} {
parts[i] = dword >> shift & 0xFF
}
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
}
}
}
func decodeOctalHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
var parts [4]int64
for i := 1; i <= 4; i++ {
parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
}
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
}
}
}
func decodeHexHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
// Conversion is safe because of regex validation
parsed, _ := strconv.ParseInt(matches[1], 16, 0)
// Set host as DWORD (base 10) encoded host
u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
// The rest is the same as decoding a DWORD host
decodeDWORDHost(u)
}
}
}
func removeUnncessaryHostDots(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
// Trim the leading and trailing dots
u.Host = strings.Trim(matches[1], ".")
if len(matches) > 2 {
u.Host += matches[2]
}
}
}
}
func removeEmptyPortSeparator(u *url.URL) {
if len(u.Host) > 0 {
u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
}
}

View File

@ -1,15 +0,0 @@
language: go
go:
- 1.4.x
- 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
- tip
install:
- go build .
script:
- go test -v

View File

@ -1,27 +0,0 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,16 +0,0 @@
urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc)
======
Package urlesc implements query escaping as per RFC 3986.
It contains some parts of the net/url package, modified so as to allow
some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
## Install
go get github.com/PuerkitoBio/urlesc
## License
Go license (BSD-3-Clause)

View File

@ -1,180 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package urlesc implements query escaping as per RFC 3986.
// It contains some parts of the net/url package, modified so as to allow
// some reserved characters incorrectly escaped by net/url.
// See https://github.com/golang/go/issues/5684
package urlesc
import (
"bytes"
"net/url"
"strings"
)
type encoding int
const (
encodePath encoding = 1 + iota
encodeUserPassword
encodeQueryComponent
encodeFragment
)
// Return true if the specified character should be escaped when
// appearing in a URL string, according to RFC 3986.
func shouldEscape(c byte, mode encoding) bool {
// §2.3 Unreserved characters (alphanum)
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
return false
}
switch c {
case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
return false
// §2.2 Reserved characters (reserved)
case ':', '/', '?', '#', '[', ']', '@', // gen-delims
'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
// Different sections of the URL allow a few of
// the reserved characters to appear unescaped.
switch mode {
case encodePath: // §3.3
// The RFC allows sub-delims and : @.
// '/', '[' and ']' can be used to assign meaning to individual path
// segments. This package only manipulates the path as a whole,
// so we allow those as well. That leaves only ? and # to escape.
return c == '?' || c == '#'
case encodeUserPassword: // §3.2.1
// The RFC allows : and sub-delims in
// userinfo. The parsing of userinfo treats ':' as special so we must escape
// all the gen-delims.
return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
case encodeQueryComponent: // §3.4
// The RFC allows / and ?.
return c != '/' && c != '?'
case encodeFragment: // §4.1
// The RFC text is silent but the grammar allows
// everything, so escape nothing but #
return c == '#'
}
}
// Everything else must be escaped.
return true
}
// QueryEscape escapes the string so it can be safely placed
// inside a URL query.
func QueryEscape(s string) string {
return escape(s, encodeQueryComponent)
}
func escape(s string, mode encoding) string {
spaceCount, hexCount := 0, 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c, mode) {
if c == ' ' && mode == encodeQueryComponent {
spaceCount++
} else {
hexCount++
}
}
}
if spaceCount == 0 && hexCount == 0 {
return s
}
t := make([]byte, len(s)+2*hexCount)
j := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case c == ' ' && mode == encodeQueryComponent:
t[j] = '+'
j++
case shouldEscape(c, mode):
t[j] = '%'
t[j+1] = "0123456789ABCDEF"[c>>4]
t[j+2] = "0123456789ABCDEF"[c&15]
j += 3
default:
t[j] = s[i]
j++
}
}
return string(t)
}
var uiReplacer = strings.NewReplacer(
"%21", "!",
"%27", "'",
"%28", "(",
"%29", ")",
"%2A", "*",
)
// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
func unescapeUserinfo(s string) string {
return uiReplacer.Replace(s)
}
// Escape reassembles the URL into a valid URL string.
// The general form of the result is one of:
//
// scheme:opaque
// scheme://userinfo@host/path?query#fragment
//
// If u.Opaque is non-empty, String uses the first form;
// otherwise it uses the second form.
//
// In the second form, the following rules apply:
// - if u.Scheme is empty, scheme: is omitted.
// - if u.User is nil, userinfo@ is omitted.
// - if u.Host is empty, host/ is omitted.
// - if u.Scheme and u.Host are empty and u.User is nil,
// the entire scheme://userinfo@host/ is omitted.
// - if u.Host is non-empty and u.Path begins with a /,
// the form host/path does not add its own /.
// - if u.RawQuery is empty, ?query is omitted.
// - if u.Fragment is empty, #fragment is omitted.
func Escape(u *url.URL) string {
var buf bytes.Buffer
if u.Scheme != "" {
buf.WriteString(u.Scheme)
buf.WriteByte(':')
}
if u.Opaque != "" {
buf.WriteString(u.Opaque)
} else {
if u.Scheme != "" || u.Host != "" || u.User != nil {
buf.WriteString("//")
if ui := u.User; ui != nil {
buf.WriteString(unescapeUserinfo(ui.String()))
buf.WriteByte('@')
}
if h := u.Host; h != "" {
buf.WriteString(h)
}
}
if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
buf.WriteByte('/')
}
buf.WriteString(escape(u.Path, encodePath))
}
if u.RawQuery != "" {
buf.WriteByte('?')
buf.WriteString(u.RawQuery)
}
if u.Fragment != "" {
buf.WriteByte('#')
buf.WriteString(escape(u.Fragment, encodeFragment))
}
return buf.String()
}

View File

@ -1,3 +0,0 @@
.fuzz/
*.zip

View File

@ -1,10 +0,0 @@
language: go
go:
- 1.4.3
- 1.5.4
- 1.6.4
- 1.7.6
- 1.8.3
script: make check

View File

@ -1,26 +0,0 @@
Copyright (c) 2017 Ryan Armstrong. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,18 +0,0 @@
PACKAGE = github.com/cavaliercoder/go-cpio
all: check
check:
go test -v
cpio-fuzz.zip: *.go
go-fuzz-build $(PACKAGE)
fuzz: cpio-fuzz.zip
go-fuzz -bin=./cpio-fuzz.zip -workdir=.fuzz/
clean-fuzz:
rm -rf cpio-fuzz.zip .fuzz/crashers/* .fuzz/suppressions/*
.PHONY: all check

View File

@ -1,62 +0,0 @@
# go-cpio [![GoDoc](https://godoc.org/github.com/cavaliercoder/go-cpio?status.svg)](https://godoc.org/github.com/cavaliercoder/go-cpio) [![Build Status](https://travis-ci.org/cavaliercoder/go-cpio.svg?branch=master)](https://travis-ci.org/cavaliercoder/go-cpio) [![Go Report Card](https://goreportcard.com/badge/github.com/cavaliercoder/go-cpio)](https://goreportcard.com/report/github.com/cavaliercoder/go-cpio)
This package provides a Go native implementation of the CPIO archive file
format.
Currently, only the SVR4 (New ASCII) format is supported, both with and without
checksums.
```go
// Create a buffer to write our archive to.
buf := new(bytes.Buffer)
// Create a new cpio archive.
w := cpio.NewWriter(buf)
// Add some files to the archive.
var files = []struct {
Name, Body string
}{
{"readme.txt", "This archive contains some text files."},
{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
{"todo.txt", "Get animal handling license."},
}
for _, file := range files {
hdr := &cpio.Header{
Name: file.Name,
Mode: 0600,
Size: int64(len(file.Body)),
}
if err := w.WriteHeader(hdr); err != nil {
log.Fatalln(err)
}
if _, err := w.Write([]byte(file.Body)); err != nil {
log.Fatalln(err)
}
}
// Make sure to check the error on Close.
if err := w.Close(); err != nil {
log.Fatalln(err)
}
// Open the cpio archive for reading.
b := bytes.NewReader(buf.Bytes())
r := cpio.NewReader(b)
// Iterate through the files in the archive.
for {
hdr, err := r.Next()
if err == io.EOF {
// end of cpio archive
break
}
if err != nil {
log.Fatalln(err)
}
fmt.Printf("Contents of %s:\n", hdr.Name)
if _, err := io.Copy(os.Stdout, r); err != nil {
log.Fatalln(err)
}
fmt.Println()
}
```

View File

@ -1,8 +0,0 @@
/*
Package cpio implements access to CPIO archives. Currently, only the SVR4 (New
ASCII) format is supported, both with and without checksums.
References:
https://www.freebsd.org/cgi/man.cgi?query=cpio&sektion=5
*/
package cpio

View File

@ -1,75 +0,0 @@
package cpio
import (
"os"
"path"
"time"
)
// headerFileInfo implements os.FileInfo.
type headerFileInfo struct {
h *Header
}
// Name returns the base name of the file.
func (fi headerFileInfo) Name() string {
if fi.IsDir() {
return path.Base(path.Clean(fi.h.Name))
}
return path.Base(fi.h.Name)
}
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
func (fi headerFileInfo) Sys() interface{} { return fi.h }
func (fi headerFileInfo) Mode() (mode os.FileMode) {
// Set file permission bits.
mode = os.FileMode(fi.h.Mode).Perm()
// Set setuid, setgid and sticky bits.
if fi.h.Mode&ModeSetuid != 0 {
// setuid
mode |= os.ModeSetuid
}
if fi.h.Mode&ModeSetgid != 0 {
// setgid
mode |= os.ModeSetgid
}
if fi.h.Mode&ModeSticky != 0 {
// sticky
mode |= os.ModeSticky
}
// Set file mode bits.
// clear perm, setuid, setgid and sticky bits.
m := os.FileMode(fi.h.Mode) & 0170000
if m == ModeDir {
// directory
mode |= os.ModeDir
}
if m == ModeNamedPipe {
// named pipe (FIFO)
mode |= os.ModeNamedPipe
}
if m == ModeSymlink {
// symbolic link
mode |= os.ModeSymlink
}
if m == ModeDevice {
// device file
mode |= os.ModeDevice
}
if m == ModeCharDevice {
// Unix character device
mode |= os.ModeDevice
mode |= os.ModeCharDevice
}
if m == ModeSocket {
// Unix domain socket
mode |= os.ModeSocket
}
return mode
}

View File

@ -1,35 +0,0 @@
// +build gofuzz
package cpio
import "bytes"
import "io"
// Fuzz tests the parsing and error handling of random byte arrays using
// https://github.com/dvyukov/go-fuzz.
func Fuzz(data []byte) int {
r := NewReader(bytes.NewReader(data))
h := NewHash()
for {
hdr, err := r.Next()
if err != nil {
if hdr != nil {
panic("hdr != nil on error")
}
if err == io.EOF {
// everything worked with random input... interesting
return 1
}
// error returned for random input. Good!
return -1
}
// hash file
h.Reset()
io.CopyN(h, r, hdr.Size)
h.Sum32()
// convert file header
FileInfoHeader(hdr.FileInfo())
}
}

View File

@ -1,45 +0,0 @@
package cpio
import (
"encoding/binary"
"hash"
)
type digest struct {
sum uint32
}
// NewHash returns a new hash.Hash32 computing the SVR4 checksum.
func NewHash() hash.Hash32 {
return &digest{}
}
func (d *digest) Write(p []byte) (n int, err error) {
for _, b := range p {
d.sum += uint32(b & 0xFF)
}
return len(p), nil
}
func (d *digest) Sum(b []byte) []byte {
out := [4]byte{}
binary.LittleEndian.PutUint32(out[:], d.sum)
return append(b, out[:]...)
}
func (d *digest) Sum32() uint32 {
return d.sum
}
func (d *digest) Reset() {
d.sum = 0
}
func (d *digest) Size() int {
return 4
}
func (d *digest) BlockSize() int {
return 1
}

View File

@ -1,153 +0,0 @@
package cpio
import (
"errors"
"fmt"
"os"
"time"
)
// Mode constants from the cpio spec.
const (
ModeSetuid = 04000 // Set uid
ModeSetgid = 02000 // Set gid
ModeSticky = 01000 // Save text (sticky bit)
ModeDir = 040000 // Directory
ModeNamedPipe = 010000 // FIFO
ModeRegular = 0100000 // Regular file
ModeSymlink = 0120000 // Symbolic link
ModeDevice = 060000 // Block special file
ModeCharDevice = 020000 // Character special file
ModeSocket = 0140000 // Socket
ModeType = 0170000 // Mask for the type bits
ModePerm = 0777 // Unix permission bits
)
const (
// headerEOF is the value of the filename of the last header in a CPIO archive.
headerEOF = "TRAILER!!!"
)
var (
ErrHeader = errors.New("cpio: invalid cpio header")
)
// A FileMode represents a file's mode and permission bits.
type FileMode int64
func (m FileMode) String() string {
return fmt.Sprintf("%#o", m)
}
// IsDir reports whether m describes a directory. That is, it tests for the
// ModeDir bit being set in m.
func (m FileMode) IsDir() bool {
return m&ModeDir != 0
}
// IsRegular reports whether m describes a regular file. That is, it tests for
// the ModeRegular bit being set in m.
func (m FileMode) IsRegular() bool {
return m&^ModePerm == ModeRegular
}
// Perm returns the Unix permission bits in m.
func (m FileMode) Perm() FileMode {
return m & ModePerm
}
// Checksum is the sum of all bytes in the file data. This sum is computed
// treating all bytes as unsigned values and using unsigned arithmetic. Only
// the least-significant 32 bits of the sum are stored. Use NewHash to compute
// the actual checksum of an archived file.
type Checksum uint32
func (c Checksum) String() string {
return fmt.Sprintf("%08X", uint32(c))
}
// A Header represents a single header in a CPIO archive.
type Header struct {
DeviceID int
Inode int64 // inode number
Mode FileMode // permission and mode bits
UID int // user id of the owner
GID int // group id of the owner
Links int // number of inbound links
ModTime time.Time // modified time
Size int64 // size in bytes
Name string // filename
Linkname string // target name of link
Checksum Checksum // computed checksum
pad int64 // bytes to pad before next header
}
// FileInfo returns an os.FileInfo for the Header.
func (h *Header) FileInfo() os.FileInfo {
return headerFileInfo{h}
}
// FileInfoHeader creates a partially-populated Header from fi.
// If fi describes a symlink, FileInfoHeader records link as the link target.
// If fi describes a directory, a slash is appended to the name.
// Because os.FileInfo's Name method returns only the base name of
// the file it describes, it may be necessary to modify the Name field
// of the returned header to provide the full path name of the file.
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
if fi == nil {
return nil, errors.New("cpio: FileInfo is nil")
}
if sys, ok := fi.Sys().(*Header); ok {
// This FileInfo came from a Header (not the OS). Return a copy of the
// original Header.
h := &Header{}
*h = *sys
return h, nil
}
fm := fi.Mode()
h := &Header{
Name: fi.Name(),
Mode: FileMode(fi.Mode().Perm()), // or'd with Mode* constants later
ModTime: fi.ModTime(),
Size: fi.Size(),
}
switch {
case fm.IsRegular():
h.Mode |= ModeRegular
case fi.IsDir():
h.Mode |= ModeDir
h.Name += "/"
h.Size = 0
case fm&os.ModeSymlink != 0:
h.Mode |= ModeSymlink
h.Linkname = link
case fm&os.ModeDevice != 0:
if fm&os.ModeCharDevice != 0 {
h.Mode |= ModeCharDevice
} else {
h.Mode |= ModeDevice
}
case fm&os.ModeNamedPipe != 0:
h.Mode |= ModeNamedPipe
case fm&os.ModeSocket != 0:
h.Mode |= ModeSocket
default:
return nil, fmt.Errorf("cpio: unknown file mode %v", fm)
}
if fm&os.ModeSetuid != 0 {
h.Mode |= ModeSetuid
}
if fm&os.ModeSetgid != 0 {
h.Mode |= ModeSetgid
}
if fm&os.ModeSticky != 0 {
h.Mode |= ModeSticky
}
return h, nil
}

View File

@ -1,72 +0,0 @@
package cpio
import (
"io"
"io/ioutil"
)
// A Reader provides sequential access to the contents of a CPIO archive. A CPIO
// archive consists of a sequence of files. The Next method advances to the next
// file in the archive (including the first), and then it can be treated as an
// io.Reader to access the file's data.
type Reader struct {
r io.Reader // underlying file reader
hdr *Header // current Header
eof int64 // bytes until the end of the current file
}
// NewReader creates a new Reader reading from r.
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
}
}
// Read reads from the current entry in the CPIO archive. It returns 0, io.EOF
// when it reaches the end of that entry, until Next is called to advance to the
// next entry.
func (r *Reader) Read(p []byte) (n int, err error) {
if r.hdr == nil || r.eof == 0 {
return 0, io.EOF
}
rn := len(p)
if r.eof < int64(rn) {
rn = int(r.eof)
}
n, err = r.r.Read(p[0:rn])
r.eof -= int64(n)
return
}
// Next advances to the next entry in the CPIO archive.
// io.EOF is returned at the end of the input.
func (r *Reader) Next() (*Header, error) {
if r.hdr == nil {
return r.next()
}
skp := r.eof + r.hdr.pad
if skp > 0 {
_, err := io.CopyN(ioutil.Discard, r.r, skp)
if err != nil {
return nil, err
}
}
return r.next()
}
func (r *Reader) next() (*Header, error) {
r.eof = 0
hdr, err := readHeader(r.r)
if err != nil {
return nil, err
}
r.hdr = hdr
r.eof = hdr.Size
return hdr, nil
}
// ReadHeader creates a new Header, reading from r.
func readHeader(r io.Reader) (*Header, error) {
// currently only SVR4 format is supported
return readSVR4Header(r)
}

View File

@ -1,152 +0,0 @@
package cpio
import (
"bytes"
"fmt"
"io"
"strconv"
"time"
)
const (
svr4MaxNameSize = 4096 // MAX_PATH
svr4MaxFileSize = 4294967295
)
var svr4Magic = []byte{0x30, 0x37, 0x30, 0x37, 0x30, 0x31} // 070701
func readHex(s string) int64 {
// errors are ignored and 0 returned
i, _ := strconv.ParseInt(s, 16, 64)
return i
}
func writeHex(b []byte, i int64) {
// i needs to be in range of uint32
copy(b, fmt.Sprintf("%08X", i))
}
func readSVR4Header(r io.Reader) (*Header, error) {
var buf [110]byte
if _, err := io.ReadFull(r, buf[:]); err != nil {
return nil, err
}
// TODO: check endianness
// check magic
hasCRC := false
if !bytes.HasPrefix(buf[:], svr4Magic[:5]) {
return nil, ErrHeader
}
if buf[5] == 0x32 { // '2'
hasCRC = true
} else if buf[5] != 0x31 { // '1'
return nil, ErrHeader
}
asc := string(buf[:])
hdr := &Header{}
hdr.Inode = readHex(asc[6:14])
hdr.Mode = FileMode(readHex(asc[14:22]))
hdr.UID = int(readHex(asc[22:30]))
hdr.GID = int(readHex(asc[30:38]))
hdr.Links = int(readHex(asc[38:46]))
hdr.ModTime = time.Unix(readHex(asc[46:54]), 0)
hdr.Size = readHex(asc[54:62])
if hdr.Size > svr4MaxFileSize {
return nil, ErrHeader
}
nameSize := readHex(asc[94:102])
if nameSize < 1 || nameSize > svr4MaxNameSize {
return nil, ErrHeader
}
hdr.Checksum = Checksum(readHex(asc[102:110]))
if !hasCRC && hdr.Checksum != 0 {
return nil, ErrHeader
}
name := make([]byte, nameSize)
if _, err := io.ReadFull(r, name); err != nil {
return nil, err
}
hdr.Name = string(name[:nameSize-1])
if hdr.Name == headerEOF {
return nil, io.EOF
}
// store padding between end of file and next header
hdr.pad = (4 - (hdr.Size % 4)) % 4
// skip to end of header/start of file
pad := (4 - (len(buf)+len(name))%4) % 4
if pad > 0 {
if _, err := io.ReadFull(r, buf[:pad]); err != nil {
return nil, err
}
}
// read link name
if hdr.Mode&^ModePerm == ModeSymlink {
if hdr.Size < 1 || hdr.Size > svr4MaxNameSize {
return nil, ErrHeader
}
b := make([]byte, hdr.Size)
if _, err := io.ReadFull(r, b); err != nil {
return nil, err
}
hdr.Linkname = string(b)
hdr.Size = 0
}
return hdr, nil
}
func writeSVR4Header(w io.Writer, hdr *Header) (pad int64, err error) {
var hdrBuf [110]byte
for i := 0; i < len(hdrBuf); i++ {
hdrBuf[i] = '0'
}
magic := svr4Magic
if hdr.Checksum != 0 {
magic[5] = 0x32
}
copy(hdrBuf[:], magic)
writeHex(hdrBuf[6:14], hdr.Inode)
writeHex(hdrBuf[14:22], int64(hdr.Mode))
writeHex(hdrBuf[22:30], int64(hdr.UID))
writeHex(hdrBuf[30:38], int64(hdr.GID))
writeHex(hdrBuf[38:46], int64(hdr.Links))
if !hdr.ModTime.IsZero() {
writeHex(hdrBuf[46:54], hdr.ModTime.Unix())
}
writeHex(hdrBuf[54:62], hdr.Size)
writeHex(hdrBuf[94:102], int64(len(hdr.Name)+1))
if hdr.Checksum != 0 {
writeHex(hdrBuf[102:110], int64(hdr.Checksum))
}
// write header
_, err = w.Write(hdrBuf[:])
if err != nil {
return
}
// write filename
_, err = io.WriteString(w, hdr.Name+"\x00")
if err != nil {
return
}
// pad to end of filename
npad := (4 - ((len(hdrBuf) + len(hdr.Name) + 1) % 4)) % 4
_, err = w.Write(zeroBlock[:npad])
if err != nil {
return
}
// compute padding to end of file
pad = (4 - (hdr.Size % 4)) % 4
return
}

View File

@ -1,128 +0,0 @@
package cpio
import (
"errors"
"fmt"
"io"
)
var (
ErrWriteTooLong = errors.New("cpio: write too long")
ErrWriteAfterClose = errors.New("cpio: write after close")
)
var trailer = &Header{
Name: string(headerEOF),
Links: 1,
}
var zeroBlock [4]byte
// A Writer provides sequential writing of a CPIO archive. A CPIO archive
// consists of a sequence of files. Call WriteHeader to begin a new file, and
// then call Write to supply that file's data, writing at most hdr.Size bytes in
// total.
type Writer struct {
w io.Writer
nb int64 // number of unwritten bytes for current file entry
pad int64 // amount of padding to write after current file entry
inode int64
err error
closed bool
}
// NewWriter creates a new Writer writing to w.
func NewWriter(w io.Writer) *Writer {
return &Writer{w: w}
}
// Flush finishes writing the current file (optional).
func (w *Writer) Flush() error {
if w.nb > 0 {
w.err = fmt.Errorf("cpio: missed writing %d bytes", w.nb)
return w.err
}
_, w.err = w.w.Write(zeroBlock[:w.pad])
if w.err != nil {
return w.err
}
w.nb = 0
w.pad = 0
return w.err
}
// WriteHeader writes hdr and prepares to accept the file's contents.
// WriteHeader calls Flush if it is not the first header. Calling after a Close
// will return ErrWriteAfterClose.
func (w *Writer) WriteHeader(hdr *Header) (err error) {
if w.closed {
return ErrWriteAfterClose
}
if w.err == nil {
w.Flush()
}
if w.err != nil {
return w.err
}
if hdr.Name != headerEOF {
// TODO: should we be mutating hdr here?
// ensure all inodes are unique
w.inode++
if hdr.Inode == 0 {
hdr.Inode = w.inode
}
// ensure file type is set
if hdr.Mode&^ModePerm == 0 {
hdr.Mode |= ModeRegular
}
// ensure regular files have at least 1 inbound link
if hdr.Links < 1 && hdr.Mode.IsRegular() {
hdr.Links = 1
}
}
w.nb = hdr.Size
w.pad, w.err = writeSVR4Header(w.w, hdr)
return
}
// Write writes to the current entry in the CPIO archive. Write returns the
// error ErrWriteTooLong if more than hdr.Size bytes are written after
// WriteHeader.
func (w *Writer) Write(p []byte) (n int, err error) {
if w.closed {
err = ErrWriteAfterClose
return
}
overwrite := false
if int64(len(p)) > w.nb {
p = p[0:w.nb]
overwrite = true
}
n, err = w.w.Write(p)
w.nb -= int64(n)
if err == nil && overwrite {
err = ErrWriteTooLong
return
}
w.err = err
return
}
// Close closes the CPIO archive, flushing any unwritten data to the underlying
// writer.
func (w *Writer) Close() error {
if w.err != nil || w.closed {
return w.err
}
w.err = w.WriteHeader(trailer)
if w.err != nil {
return w.err
}
w.Flush()
w.closed = true
return w.err
}

View File

@ -1,24 +0,0 @@
Copyright (c) 2014 CloudFlare Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,94 +0,0 @@
// Package auth implements an interface for providing CFSSL
// authentication. This is meant to authenticate a client CFSSL to a
// remote CFSSL in order to prevent unauthorised use of the signature
// capabilities. This package provides both the interface and a
// standard HMAC-based implementation.
package auth
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"strings"
)
// An AuthenticatedRequest contains a request and authentication
// token. The Provider may determine whether to validate the timestamp
// and remote address.
type AuthenticatedRequest struct {
// An Authenticator decides whether to use this field.
Timestamp int64 `json:"timestamp,omitempty"`
RemoteAddress []byte `json:"remote_address,omitempty"`
Token []byte `json:"token"`
Request []byte `json:"request"`
}
// A Provider can generate tokens from a request and verify a
// request. The handling of additional authentication data (such as
// the IP address) is handled by the concrete type, as is any
// serialisation and state-keeping.
type Provider interface {
Token(req []byte) (token []byte, err error)
Verify(aReq *AuthenticatedRequest) bool
}
// Standard implements an HMAC-SHA-256 authentication provider. It may
// be supplied additional data at creation time that will be used as
// request || additional-data with the HMAC.
type Standard struct {
key []byte
ad []byte
}
// New generates a new standard authentication provider from the key
// and additional data. The additional data will be used when
// generating a new token.
func New(key string, ad []byte) (*Standard, error) {
if splitKey := strings.SplitN(key, ":", 2); len(splitKey) == 2 {
switch splitKey[0] {
case "env":
key = os.Getenv(splitKey[1])
case "file":
data, err := ioutil.ReadFile(splitKey[1])
if err != nil {
return nil, err
}
key = strings.TrimSpace(string(data))
default:
return nil, fmt.Errorf("unknown key prefix: %s", splitKey[0])
}
}
keyBytes, err := hex.DecodeString(key)
if err != nil {
return nil, err
}
return &Standard{keyBytes, ad}, nil
}
// Token generates a new authentication token from the request.
func (p Standard) Token(req []byte) (token []byte, err error) {
h := hmac.New(sha256.New, p.key)
h.Write(req)
h.Write(p.ad)
return h.Sum(nil), nil
}
// Verify determines whether an authenticated request is valid.
func (p Standard) Verify(ad *AuthenticatedRequest) bool {
if ad == nil {
return false
}
// Standard token generation returns no error.
token, _ := p.Token(ad.Request)
if len(ad.Token) != len(token) {
return false
}
return hmac.Equal(token, ad.Token)
}

View File

@ -1,75 +0,0 @@
# certdb usage
Using a database enables additional functionality for existing commands when a
db config is provided:
- `sign` and `gencert` add a certificate to the certdb after signing it
- `serve` enables database functionality for the sign and revoke endpoints
A database is required for the following:
- `revoke` marks certificates revoked in the database with an optional reason
- `ocsprefresh` refreshes the table of cached OCSP responses
- `ocspdump` outputs cached OCSP responses in a concatenated base64-encoded format
## Setup/Migration
This directory stores [goose](https://bitbucket.org/liamstask/goose/) db migration scripts for various DB backends.
Currently supported:
- MySQL in mysql
- PostgreSQL in pg
- SQLite in sqlite
### Get goose
go get bitbucket.org/liamstask/goose/cmd/goose
### Use goose to start and terminate a MySQL DB
To start a MySQL using goose:
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql up
To tear down a MySQL DB using goose
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql down
Note: the administration of MySQL DB is not included. We assume
the databases being connected to are already created and access control
is properly handled.
### Use goose to start and terminate a PostgreSQL DB
To start a PostgreSQL using goose:
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg up
To tear down a PostgreSQL DB using goose
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg down
Note: the administration of PostgreSQL DB is not included. We assume
the databases being connected to are already created and access control
is properly handled.
### Use goose to start and terminate a SQLite DB
To start a SQLite DB using goose:
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite up
To tear down a SQLite DB using goose
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite down
## CFSSL Configuration
Several cfssl commands take a -db-config flag. Create a file with a
JSON dictionary:
{"driver":"sqlite3","data_source":"certs.db"}
or
{"driver":"postgres","data_source":"postgres://user:password@host/db"}
or
{"driver":"mysql","data_source":"user:password@tcp(hostname:3306)/db?parseTime=true"}

View File

@ -1,42 +0,0 @@
package certdb
import (
"time"
)
// CertificateRecord encodes a certificate and its metadata
// that will be recorded in a database.
type CertificateRecord struct {
Serial string `db:"serial_number"`
AKI string `db:"authority_key_identifier"`
CALabel string `db:"ca_label"`
Status string `db:"status"`
Reason int `db:"reason"`
Expiry time.Time `db:"expiry"`
RevokedAt time.Time `db:"revoked_at"`
PEM string `db:"pem"`
}
// OCSPRecord encodes a OCSP response body and its metadata
// that will be recorded in a database.
type OCSPRecord struct {
Serial string `db:"serial_number"`
AKI string `db:"authority_key_identifier"`
Body string `db:"body"`
Expiry time.Time `db:"expiry"`
}
// Accessor abstracts the CRUD of certdb objects from a DB.
type Accessor interface {
InsertCertificate(cr CertificateRecord) error
GetCertificate(serial, aki string) ([]CertificateRecord, error)
GetUnexpiredCertificates() ([]CertificateRecord, error)
GetRevokedAndUnexpiredCertificates() ([]CertificateRecord, error)
GetRevokedAndUnexpiredCertificatesByLabel(label string) ([]CertificateRecord, error)
RevokeCertificate(serial, aki string, reasonCode int) error
InsertOCSP(rr OCSPRecord) error
GetOCSP(serial, aki string) ([]OCSPRecord, error)
GetUnexpiredOCSPs() ([]OCSPRecord, error)
UpdateOCSP(serial, aki, body string, expiry time.Time) error
UpsertOCSP(serial, aki, body string, expiry time.Time) error
}

View File

@ -1,659 +0,0 @@
// Package config contains the configuration logic for CFSSL.
package config
import (
"crypto/tls"
"crypto/x509"
"encoding/asn1"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"regexp"
"strconv"
"strings"
"time"
"github.com/cloudflare/cfssl/auth"
cferr "github.com/cloudflare/cfssl/errors"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/log"
ocspConfig "github.com/cloudflare/cfssl/ocsp/config"
)
// A CSRWhitelist stores booleans for fields in the CSR. If a CSRWhitelist is
// not present in a SigningProfile, all of these fields may be copied from the
// CSR into the signed certificate. If a CSRWhitelist *is* present in a
// SigningProfile, only those fields with a `true` value in the CSRWhitelist may
// be copied from the CSR to the signed certificate. Note that some of these
// fields, like Subject, can be provided or partially provided through the API.
// Since API clients are expected to be trusted, but CSRs are not, fields
// provided through the API are not subject to whitelisting through this
// mechanism.
type CSRWhitelist struct {
Subject, PublicKeyAlgorithm, PublicKey, SignatureAlgorithm bool
DNSNames, IPAddresses, EmailAddresses, URIs bool
}
// OID is our own version of asn1's ObjectIdentifier, so we can define a custom
// JSON marshal / unmarshal.
type OID asn1.ObjectIdentifier
// CertificatePolicy represents the ASN.1 PolicyInformation structure from
// https://tools.ietf.org/html/rfc3280.html#page-106.
// Valid values of Type are "id-qt-unotice" and "id-qt-cps"
type CertificatePolicy struct {
ID OID
Qualifiers []CertificatePolicyQualifier
}
// CertificatePolicyQualifier represents a single qualifier from an ASN.1
// PolicyInformation structure.
type CertificatePolicyQualifier struct {
Type string
Value string
}
// AuthRemote is an authenticated remote signer.
type AuthRemote struct {
RemoteName string `json:"remote"`
AuthKeyName string `json:"auth_key"`
}
// CAConstraint specifies various CA constraints on the signed certificate.
// CAConstraint would verify against (and override) the CA
// extensions in the given CSR.
type CAConstraint struct {
IsCA bool `json:"is_ca"`
MaxPathLen int `json:"max_path_len"`
MaxPathLenZero bool `json:"max_path_len_zero"`
}
// A SigningProfile stores information that the CA needs to store
// signature policy.
type SigningProfile struct {
Usage []string `json:"usages"`
IssuerURL []string `json:"issuer_urls"`
OCSP string `json:"ocsp_url"`
CRL string `json:"crl_url"`
CAConstraint CAConstraint `json:"ca_constraint"`
OCSPNoCheck bool `json:"ocsp_no_check"`
ExpiryString string `json:"expiry"`
BackdateString string `json:"backdate"`
AuthKeyName string `json:"auth_key"`
RemoteName string `json:"remote"`
NotBefore time.Time `json:"not_before"`
NotAfter time.Time `json:"not_after"`
NameWhitelistString string `json:"name_whitelist"`
AuthRemote AuthRemote `json:"auth_remote"`
CTLogServers []string `json:"ct_log_servers"`
AllowedExtensions []OID `json:"allowed_extensions"`
CertStore string `json:"cert_store"`
Policies []CertificatePolicy
Expiry time.Duration
Backdate time.Duration
Provider auth.Provider
RemoteProvider auth.Provider
RemoteServer string
RemoteCAs *x509.CertPool
ClientCert *tls.Certificate
CSRWhitelist *CSRWhitelist
NameWhitelist *regexp.Regexp
ExtensionWhitelist map[string]bool
ClientProvidesSerialNumbers bool
}
// UnmarshalJSON unmarshals a JSON string into an OID.
func (oid *OID) UnmarshalJSON(data []byte) (err error) {
if data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("OID JSON string not wrapped in quotes." + string(data))
}
data = data[1 : len(data)-1]
parsedOid, err := parseObjectIdentifier(string(data))
if err != nil {
return err
}
*oid = OID(parsedOid)
return
}
// MarshalJSON marshals an oid into a JSON string.
func (oid OID) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%v"`, asn1.ObjectIdentifier(oid))), nil
}
func parseObjectIdentifier(oidString string) (oid asn1.ObjectIdentifier, err error) {
validOID, err := regexp.MatchString("\\d(\\.\\d+)*", oidString)
if err != nil {
return
}
if !validOID {
err = errors.New("Invalid OID")
return
}
segments := strings.Split(oidString, ".")
oid = make(asn1.ObjectIdentifier, len(segments))
for i, intString := range segments {
oid[i], err = strconv.Atoi(intString)
if err != nil {
return
}
}
return
}
const timeFormat = "2006-01-02T15:04:05"
// populate is used to fill in the fields that are not in JSON
//
// First, the ExpiryString parameter is needed to parse
// expiration timestamps from JSON. The JSON decoder is not able to
// decode a string time duration to a time.Duration, so this is called
// when loading the configuration to properly parse and fill out the
// Expiry parameter.
// This function is also used to create references to the auth key
// and default remote for the profile.
// It returns true if ExpiryString is a valid representation of a
// time.Duration, and the AuthKeyString and RemoteName point to
// valid objects. It returns false otherwise.
func (p *SigningProfile) populate(cfg *Config) error {
if p == nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("can't parse nil profile"))
}
var err error
if p.RemoteName == "" && p.AuthRemote.RemoteName == "" {
log.Debugf("parse expiry in profile")
if p.ExpiryString == "" {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("empty expiry string"))
}
dur, err := time.ParseDuration(p.ExpiryString)
if err != nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
}
log.Debugf("expiry is valid")
p.Expiry = dur
if p.BackdateString != "" {
dur, err = time.ParseDuration(p.BackdateString)
if err != nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
}
p.Backdate = dur
}
if !p.NotBefore.IsZero() && !p.NotAfter.IsZero() && p.NotAfter.Before(p.NotBefore) {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
}
if len(p.Policies) > 0 {
for _, policy := range p.Policies {
for _, qualifier := range policy.Qualifiers {
if qualifier.Type != "" && qualifier.Type != "id-qt-unotice" && qualifier.Type != "id-qt-cps" {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("invalid policy qualifier type"))
}
}
}
}
} else if p.RemoteName != "" {
log.Debug("match remote in profile to remotes section")
if p.AuthRemote.RemoteName != "" {
log.Error("profile has both a remote and an auth remote specified")
return cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
}
if remote := cfg.Remotes[p.RemoteName]; remote != "" {
if err := p.updateRemote(remote); err != nil {
return err
}
} else {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to find remote in remotes section"))
}
} else {
log.Debug("match auth remote in profile to remotes section")
if remote := cfg.Remotes[p.AuthRemote.RemoteName]; remote != "" {
if err := p.updateRemote(remote); err != nil {
return err
}
} else {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to find remote in remotes section"))
}
}
if p.AuthKeyName != "" {
log.Debug("match auth key in profile to auth_keys section")
if key, ok := cfg.AuthKeys[p.AuthKeyName]; ok == true {
if key.Type == "standard" {
p.Provider, err = auth.New(key.Key, nil)
if err != nil {
log.Debugf("failed to create new standard auth provider: %v", err)
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to create new standard auth provider"))
}
} else {
log.Debugf("unknown authentication type %v", key.Type)
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("unknown authentication type"))
}
} else {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to find auth_key in auth_keys section"))
}
}
if p.AuthRemote.AuthKeyName != "" {
log.Debug("match auth remote key in profile to auth_keys section")
if key, ok := cfg.AuthKeys[p.AuthRemote.AuthKeyName]; ok == true {
if key.Type == "standard" {
p.RemoteProvider, err = auth.New(key.Key, nil)
if err != nil {
log.Debugf("failed to create new standard auth provider: %v", err)
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to create new standard auth provider"))
}
} else {
log.Debugf("unknown authentication type %v", key.Type)
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("unknown authentication type"))
}
} else {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to find auth_remote's auth_key in auth_keys section"))
}
}
if p.NameWhitelistString != "" {
log.Debug("compiling whitelist regular expression")
rule, err := regexp.Compile(p.NameWhitelistString)
if err != nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to compile name whitelist section"))
}
p.NameWhitelist = rule
}
p.ExtensionWhitelist = map[string]bool{}
for _, oid := range p.AllowedExtensions {
p.ExtensionWhitelist[asn1.ObjectIdentifier(oid).String()] = true
}
return nil
}
// updateRemote takes a signing profile and initializes the remote server object
// to the hostname:port combination sent by remote.
func (p *SigningProfile) updateRemote(remote string) error {
if remote != "" {
p.RemoteServer = remote
}
return nil
}
// OverrideRemotes takes a signing configuration and updates the remote server object
// to the hostname:port combination sent by remote
func (p *Signing) OverrideRemotes(remote string) error {
if remote != "" {
var err error
for _, profile := range p.Profiles {
err = profile.updateRemote(remote)
if err != nil {
return err
}
}
err = p.Default.updateRemote(remote)
if err != nil {
return err
}
}
return nil
}
// SetClientCertKeyPairFromFile updates the properties to set client certificates for mutual
// authenticated TLS remote requests
func (p *Signing) SetClientCertKeyPairFromFile(certFile string, keyFile string) error {
if certFile != "" && keyFile != "" {
cert, err := helpers.LoadClientCertificate(certFile, keyFile)
if err != nil {
return err
}
for _, profile := range p.Profiles {
profile.ClientCert = cert
}
p.Default.ClientCert = cert
}
return nil
}
// SetRemoteCAsFromFile reads root CAs from file and updates the properties to set remote CAs for TLS
// remote requests
func (p *Signing) SetRemoteCAsFromFile(caFile string) error {
if caFile != "" {
remoteCAs, err := helpers.LoadPEMCertPool(caFile)
if err != nil {
return err
}
p.SetRemoteCAs(remoteCAs)
}
return nil
}
// SetRemoteCAs updates the properties to set remote CAs for TLS
// remote requests
func (p *Signing) SetRemoteCAs(remoteCAs *x509.CertPool) {
for _, profile := range p.Profiles {
profile.RemoteCAs = remoteCAs
}
p.Default.RemoteCAs = remoteCAs
}
// NeedsRemoteSigner returns true if one of the profiles has a remote set
func (p *Signing) NeedsRemoteSigner() bool {
for _, profile := range p.Profiles {
if profile.RemoteServer != "" {
return true
}
}
if p.Default.RemoteServer != "" {
return true
}
return false
}
// NeedsLocalSigner returns true if one of the profiles doe not have a remote set
func (p *Signing) NeedsLocalSigner() bool {
for _, profile := range p.Profiles {
if profile.RemoteServer == "" {
return true
}
}
if p.Default.RemoteServer == "" {
return true
}
return false
}
// Usages parses the list of key uses in the profile, translating them
// to a list of X.509 key usages and extended key usages. The unknown
// uses are collected into a slice that is also returned.
func (p *SigningProfile) Usages() (ku x509.KeyUsage, eku []x509.ExtKeyUsage, unk []string) {
for _, keyUse := range p.Usage {
if kuse, ok := KeyUsage[keyUse]; ok {
ku |= kuse
} else if ekuse, ok := ExtKeyUsage[keyUse]; ok {
eku = append(eku, ekuse)
} else {
unk = append(unk, keyUse)
}
}
return
}
// A valid profile must be a valid local profile or a valid remote profile.
// A valid local profile has defined at least key usages to be used, and a
// valid local default profile has defined at least a default expiration.
// A valid remote profile (default or not) has remote signer initialized.
// In addition, a remote profile must has a valid auth provider if auth
// key defined.
func (p *SigningProfile) validProfile(isDefault bool) bool {
if p == nil {
return false
}
if p.AuthRemote.RemoteName == "" && p.AuthRemote.AuthKeyName != "" {
log.Debugf("invalid auth remote profile: no remote signer specified")
return false
}
if p.RemoteName != "" {
log.Debugf("validate remote profile")
if p.RemoteServer == "" {
log.Debugf("invalid remote profile: no remote signer specified")
return false
}
if p.AuthKeyName != "" && p.Provider == nil {
log.Debugf("invalid remote profile: auth key name is defined but no auth provider is set")
return false
}
if p.AuthRemote.RemoteName != "" {
log.Debugf("invalid remote profile: auth remote is also specified")
return false
}
} else if p.AuthRemote.RemoteName != "" {
log.Debugf("validate auth remote profile")
if p.RemoteServer == "" {
log.Debugf("invalid auth remote profile: no remote signer specified")
return false
}
if p.AuthRemote.AuthKeyName == "" || p.RemoteProvider == nil {
log.Debugf("invalid auth remote profile: no auth key is defined")
return false
}
} else {
log.Debugf("validate local profile")
if !isDefault {
if len(p.Usage) == 0 {
log.Debugf("invalid local profile: no usages specified")
return false
} else if _, _, unk := p.Usages(); len(unk) == len(p.Usage) {
log.Debugf("invalid local profile: no valid usages")
return false
}
} else {
if p.Expiry == 0 {
log.Debugf("invalid local profile: no expiry set")
return false
}
}
}
log.Debugf("profile is valid")
return true
}
// This checks if the SigningProfile object contains configurations that are only effective with a local signer
// which has access to CA private key.
func (p *SigningProfile) hasLocalConfig() bool {
if p.Usage != nil ||
p.IssuerURL != nil ||
p.OCSP != "" ||
p.ExpiryString != "" ||
p.BackdateString != "" ||
p.CAConstraint.IsCA != false ||
!p.NotBefore.IsZero() ||
!p.NotAfter.IsZero() ||
p.NameWhitelistString != "" ||
len(p.CTLogServers) != 0 {
return true
}
return false
}
// warnSkippedSettings prints a log warning message about skipped settings
// in a SigningProfile, usually due to remote signer.
func (p *Signing) warnSkippedSettings() {
const warningMessage = `The configuration value by "usages", "issuer_urls", "ocsp_url", "crl_url", "ca_constraint", "expiry", "backdate", "not_before", "not_after", "cert_store" and "ct_log_servers" are skipped`
if p == nil {
return
}
if (p.Default.RemoteName != "" || p.Default.AuthRemote.RemoteName != "") && p.Default.hasLocalConfig() {
log.Warning("default profile points to a remote signer: ", warningMessage)
}
for name, profile := range p.Profiles {
if (profile.RemoteName != "" || profile.AuthRemote.RemoteName != "") && profile.hasLocalConfig() {
log.Warningf("Profiles[%s] points to a remote signer: %s", name, warningMessage)
}
}
}
// Signing codifies the signature configuration policy for a CA.
type Signing struct {
Profiles map[string]*SigningProfile `json:"profiles"`
Default *SigningProfile `json:"default"`
}
// Config stores configuration information for the CA.
type Config struct {
Signing *Signing `json:"signing"`
OCSP *ocspConfig.Config `json:"ocsp"`
AuthKeys map[string]AuthKey `json:"auth_keys,omitempty"`
Remotes map[string]string `json:"remotes,omitempty"`
}
// Valid ensures that Config is a valid configuration. It should be
// called immediately after parsing a configuration file.
func (c *Config) Valid() bool {
return c.Signing.Valid()
}
// Valid checks the signature policies, ensuring they are valid
// policies. A policy is valid if it has defined at least key usages
// to be used, and a valid default profile has defined at least a
// default expiration.
func (p *Signing) Valid() bool {
if p == nil {
return false
}
log.Debugf("validating configuration")
if !p.Default.validProfile(true) {
log.Debugf("default profile is invalid")
return false
}
for _, sp := range p.Profiles {
if !sp.validProfile(false) {
log.Debugf("invalid profile")
return false
}
}
p.warnSkippedSettings()
return true
}
// KeyUsage contains a mapping of string names to key usages.
var KeyUsage = map[string]x509.KeyUsage{
"signing": x509.KeyUsageDigitalSignature,
"digital signature": x509.KeyUsageDigitalSignature,
"content commitment": x509.KeyUsageContentCommitment,
"key encipherment": x509.KeyUsageKeyEncipherment,
"key agreement": x509.KeyUsageKeyAgreement,
"data encipherment": x509.KeyUsageDataEncipherment,
"cert sign": x509.KeyUsageCertSign,
"crl sign": x509.KeyUsageCRLSign,
"encipher only": x509.KeyUsageEncipherOnly,
"decipher only": x509.KeyUsageDecipherOnly,
}
// ExtKeyUsage contains a mapping of string names to extended key
// usages.
var ExtKeyUsage = map[string]x509.ExtKeyUsage{
"any": x509.ExtKeyUsageAny,
"server auth": x509.ExtKeyUsageServerAuth,
"client auth": x509.ExtKeyUsageClientAuth,
"code signing": x509.ExtKeyUsageCodeSigning,
"email protection": x509.ExtKeyUsageEmailProtection,
"s/mime": x509.ExtKeyUsageEmailProtection,
"ipsec end system": x509.ExtKeyUsageIPSECEndSystem,
"ipsec tunnel": x509.ExtKeyUsageIPSECTunnel,
"ipsec user": x509.ExtKeyUsageIPSECUser,
"timestamping": x509.ExtKeyUsageTimeStamping,
"ocsp signing": x509.ExtKeyUsageOCSPSigning,
"microsoft sgc": x509.ExtKeyUsageMicrosoftServerGatedCrypto,
"netscape sgc": x509.ExtKeyUsageNetscapeServerGatedCrypto,
}
// An AuthKey contains an entry for a key used for authentication.
type AuthKey struct {
// Type contains information needed to select the appropriate
// constructor. For example, "standard" for HMAC-SHA-256,
// "standard-ip" for HMAC-SHA-256 incorporating the client's
// IP.
Type string `json:"type"`
// Key contains the key information, such as a hex-encoded
// HMAC key.
Key string `json:"key"`
}
// DefaultConfig returns a default configuration specifying basic key
// usage and a 1 year expiration time. The key usages chosen are
// signing, key encipherment, client auth and server auth.
func DefaultConfig() *SigningProfile {
d := helpers.OneYear
return &SigningProfile{
Usage: []string{"signing", "key encipherment", "server auth", "client auth"},
Expiry: d,
ExpiryString: "8760h",
}
}
// LoadFile attempts to load the configuration file stored at the path
// and returns the configuration. On error, it returns nil.
func LoadFile(path string) (*Config, error) {
log.Debugf("loading configuration file from %s", path)
if path == "" {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid path"))
}
body, err := ioutil.ReadFile(path)
if err != nil {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("could not read configuration file"))
}
return LoadConfig(body)
}
// LoadConfig attempts to load the configuration from a byte slice.
// On error, it returns nil.
func LoadConfig(config []byte) (*Config, error) {
var cfg = &Config{}
err := json.Unmarshal(config, &cfg)
if err != nil {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to unmarshal configuration: "+err.Error()))
}
if cfg.Signing == nil {
return nil, errors.New("No \"signing\" field present")
}
if cfg.Signing.Default == nil {
log.Debugf("no default given: using default config")
cfg.Signing.Default = DefaultConfig()
} else {
if err := cfg.Signing.Default.populate(cfg); err != nil {
return nil, err
}
}
for k := range cfg.Signing.Profiles {
if err := cfg.Signing.Profiles[k].populate(cfg); err != nil {
return nil, err
}
}
if !cfg.Valid() {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid configuration"))
}
log.Debugf("configuration ok")
return cfg, nil
}

View File

@ -1,188 +0,0 @@
// Package pkcs7 implements the subset of the CMS PKCS #7 datatype that is typically
// used to package certificates and CRLs. Using openssl, every certificate converted
// to PKCS #7 format from another encoding such as PEM conforms to this implementation.
// reference: https://www.openssl.org/docs/man1.1.0/apps/crl2pkcs7.html
//
// PKCS #7 Data type, reference: https://tools.ietf.org/html/rfc2315
//
// The full pkcs#7 cryptographic message syntax allows for cryptographic enhancements,
// for example data can be encrypted and signed and then packaged through pkcs#7 to be
// sent over a network and then verified and decrypted. It is asn1, and the type of
// PKCS #7 ContentInfo, which comprises the PKCS #7 structure, is:
//
// ContentInfo ::= SEQUENCE {
// contentType ContentType,
// content [0] EXPLICIT ANY DEFINED BY contentType OPTIONAL
// }
//
// There are 6 possible ContentTypes, data, signedData, envelopedData,
// signedAndEnvelopedData, digestedData, and encryptedData. Here signedData, Data, and encrypted
// Data are implemented, as the degenerate case of signedData without a signature is the typical
// format for transferring certificates and CRLS, and Data and encryptedData are used in PKCS #12
// formats.
// The ContentType signedData has the form:
//
//
// signedData ::= SEQUENCE {
// version Version,
// digestAlgorithms DigestAlgorithmIdentifiers,
// contentInfo ContentInfo,
// certificates [0] IMPLICIT ExtendedCertificatesAndCertificates OPTIONAL
// crls [1] IMPLICIT CertificateRevocationLists OPTIONAL,
// signerInfos SignerInfos
// }
//
// As of yet signerInfos and digestAlgorithms are not parsed, as they are not relevant to
// this system's use of PKCS #7 data. Version is an integer type, note that PKCS #7 is
// recursive, this second layer of ContentInfo is similar ignored for our degenerate
// usage. The ExtendedCertificatesAndCertificates type consists of a sequence of choices
// between PKCS #6 extended certificates and x509 certificates. Any sequence consisting
// of any number of extended certificates is not yet supported in this implementation.
//
// The ContentType Data is simply a raw octet string and is parsed directly into a Go []byte slice.
//
// The ContentType encryptedData is the most complicated and its form can be gathered by
// the go type below. It essentially contains a raw octet string of encrypted data and an
// algorithm identifier for use in decrypting this data.
package pkcs7
import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"errors"
cferr "github.com/cloudflare/cfssl/errors"
)
// Types used for asn1 Unmarshaling.
type signedData struct {
Version int
DigestAlgorithms asn1.RawValue
ContentInfo asn1.RawValue
Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"`
Crls asn1.RawValue `asn1:"optional"`
SignerInfos asn1.RawValue
}
type initPKCS7 struct {
Raw asn1.RawContent
ContentType asn1.ObjectIdentifier
Content asn1.RawValue `asn1:"tag:0,explicit,optional"`
}
// Object identifier strings of the three implemented PKCS7 types.
const (
ObjIDData = "1.2.840.113549.1.7.1"
ObjIDSignedData = "1.2.840.113549.1.7.2"
ObjIDEncryptedData = "1.2.840.113549.1.7.6"
)
// PKCS7 represents the ASN1 PKCS #7 Content type. It contains one of three
// possible types of Content objects, as denoted by the object identifier in
// the ContentInfo field, the other two being nil. SignedData
// is the degenerate SignedData Content info without signature used
// to hold certificates and crls. Data is raw bytes, and EncryptedData
// is as defined in PKCS #7 standard.
type PKCS7 struct {
Raw asn1.RawContent
ContentInfo string
Content Content
}
// Content implements three of the six possible PKCS7 data types. Only one is non-nil.
type Content struct {
Data []byte
SignedData SignedData
EncryptedData EncryptedData
}
// SignedData defines the typical carrier of certificates and crls.
type SignedData struct {
Raw asn1.RawContent
Version int
Certificates []*x509.Certificate
Crl *pkix.CertificateList
}
// Data contains raw bytes. Used as a subtype in PKCS12.
type Data struct {
Bytes []byte
}
// EncryptedData contains encrypted data. Used as a subtype in PKCS12.
type EncryptedData struct {
Raw asn1.RawContent
Version int
EncryptedContentInfo EncryptedContentInfo
}
// EncryptedContentInfo is a subtype of PKCS7EncryptedData.
type EncryptedContentInfo struct {
Raw asn1.RawContent
ContentType asn1.ObjectIdentifier
ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
EncryptedContent []byte `asn1:"tag:0,optional"`
}
// ParsePKCS7 attempts to parse the DER encoded bytes of a
// PKCS7 structure.
func ParsePKCS7(raw []byte) (msg *PKCS7, err error) {
var pkcs7 initPKCS7
_, err = asn1.Unmarshal(raw, &pkcs7)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
}
msg = new(PKCS7)
msg.Raw = pkcs7.Raw
msg.ContentInfo = pkcs7.ContentType.String()
switch {
case msg.ContentInfo == ObjIDData:
msg.ContentInfo = "Data"
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &msg.Content.Data)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
}
case msg.ContentInfo == ObjIDSignedData:
msg.ContentInfo = "SignedData"
var signedData signedData
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &signedData)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
}
if len(signedData.Certificates.Bytes) != 0 {
msg.Content.SignedData.Certificates, err = x509.ParseCertificates(signedData.Certificates.Bytes)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
}
}
if len(signedData.Crls.Bytes) != 0 {
msg.Content.SignedData.Crl, err = x509.ParseDERCRL(signedData.Crls.Bytes)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
}
}
msg.Content.SignedData.Version = signedData.Version
msg.Content.SignedData.Raw = pkcs7.Content.Bytes
case msg.ContentInfo == ObjIDEncryptedData:
msg.ContentInfo = "EncryptedData"
var encryptedData EncryptedData
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &encryptedData)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
}
if encryptedData.Version != 0 {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Only support for PKCS #7 encryptedData version 0"))
}
msg.Content.EncryptedData = encryptedData
default:
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Attempt to parse PKCS# 7 Content not of type data, signed data or encrypted data"))
}
return msg, nil
}

View File

@ -1,438 +0,0 @@
// Package csr implements certificate requests for CFSSL.
package csr
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/pem"
"errors"
"net"
"net/mail"
"net/url"
"strings"
cferr "github.com/cloudflare/cfssl/errors"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/log"
)
const (
curveP256 = 256
curveP384 = 384
curveP521 = 521
)
// A Name contains the SubjectInfo fields.
type Name struct {
C string // Country
ST string // State
L string // Locality
O string // OrganisationName
OU string // OrganisationalUnitName
SerialNumber string
}
// A KeyRequest is a generic request for a new key.
type KeyRequest interface {
Algo() string
Size() int
Generate() (crypto.PrivateKey, error)
SigAlgo() x509.SignatureAlgorithm
}
// A BasicKeyRequest contains the algorithm and key size for a new private key.
type BasicKeyRequest struct {
A string `json:"algo" yaml:"algo"`
S int `json:"size" yaml:"size"`
}
// NewBasicKeyRequest returns a default BasicKeyRequest.
func NewBasicKeyRequest() *BasicKeyRequest {
return &BasicKeyRequest{"ecdsa", curveP256}
}
// Algo returns the requested key algorithm represented as a string.
func (kr *BasicKeyRequest) Algo() string {
return kr.A
}
// Size returns the requested key size.
func (kr *BasicKeyRequest) Size() int {
return kr.S
}
// Generate generates a key as specified in the request. Currently,
// only ECDSA and RSA are supported.
func (kr *BasicKeyRequest) Generate() (crypto.PrivateKey, error) {
log.Debugf("generate key from request: algo=%s, size=%d", kr.Algo(), kr.Size())
switch kr.Algo() {
case "rsa":
if kr.Size() < 2048 {
return nil, errors.New("RSA key is too weak")
}
if kr.Size() > 8192 {
return nil, errors.New("RSA key size too large")
}
return rsa.GenerateKey(rand.Reader, kr.Size())
case "ecdsa":
var curve elliptic.Curve
switch kr.Size() {
case curveP256:
curve = elliptic.P256()
case curveP384:
curve = elliptic.P384()
case curveP521:
curve = elliptic.P521()
default:
return nil, errors.New("invalid curve")
}
return ecdsa.GenerateKey(curve, rand.Reader)
default:
return nil, errors.New("invalid algorithm")
}
}
// SigAlgo returns an appropriate X.509 signature algorithm given the
// key request's type and size.
func (kr *BasicKeyRequest) SigAlgo() x509.SignatureAlgorithm {
switch kr.Algo() {
case "rsa":
switch {
case kr.Size() >= 4096:
return x509.SHA512WithRSA
case kr.Size() >= 3072:
return x509.SHA384WithRSA
case kr.Size() >= 2048:
return x509.SHA256WithRSA
default:
return x509.SHA1WithRSA
}
case "ecdsa":
switch kr.Size() {
case curveP521:
return x509.ECDSAWithSHA512
case curveP384:
return x509.ECDSAWithSHA384
case curveP256:
return x509.ECDSAWithSHA256
default:
return x509.ECDSAWithSHA1
}
default:
return x509.UnknownSignatureAlgorithm
}
}
// CAConfig is a section used in the requests initialising a new CA.
type CAConfig struct {
PathLength int `json:"pathlen" yaml:"pathlen"`
PathLenZero bool `json:"pathlenzero" yaml:"pathlenzero"`
Expiry string `json:"expiry" yaml:"expiry"`
Backdate string `json:"backdate" yaml:"backdate"`
}
// A CertificateRequest encapsulates the API interface to the
// certificate request functionality.
type CertificateRequest struct {
CN string
Names []Name `json:"names" yaml:"names"`
Hosts []string `json:"hosts" yaml:"hosts"`
KeyRequest KeyRequest `json:"key,omitempty" yaml:"key,omitempty"`
CA *CAConfig `json:"ca,omitempty" yaml:"ca,omitempty"`
SerialNumber string `json:"serialnumber,omitempty" yaml:"serialnumber,omitempty"`
}
// New returns a new, empty CertificateRequest with a
// BasicKeyRequest.
func New() *CertificateRequest {
return &CertificateRequest{
KeyRequest: NewBasicKeyRequest(),
}
}
// appendIf appends to a if s is not an empty string.
func appendIf(s string, a *[]string) {
if s != "" {
*a = append(*a, s)
}
}
// Name returns the PKIX name for the request.
func (cr *CertificateRequest) Name() pkix.Name {
var name pkix.Name
name.CommonName = cr.CN
for _, n := range cr.Names {
appendIf(n.C, &name.Country)
appendIf(n.ST, &name.Province)
appendIf(n.L, &name.Locality)
appendIf(n.O, &name.Organization)
appendIf(n.OU, &name.OrganizationalUnit)
}
name.SerialNumber = cr.SerialNumber
return name
}
// BasicConstraints CSR information RFC 5280, 4.2.1.9
type BasicConstraints struct {
IsCA bool `asn1:"optional"`
MaxPathLen int `asn1:"optional,default:-1"`
}
// ParseRequest takes a certificate request and generates a key and
// CSR from it. It does no validation -- caveat emptor. It will,
// however, fail if the key request is not valid (i.e., an unsupported
// curve or RSA key size). The lack of validation was specifically
// chosen to allow the end user to define a policy and validate the
// request appropriately before calling this function.
func ParseRequest(req *CertificateRequest) (csr, key []byte, err error) {
log.Info("received CSR")
if req.KeyRequest == nil {
req.KeyRequest = NewBasicKeyRequest()
}
log.Infof("generating key: %s-%d", req.KeyRequest.Algo(), req.KeyRequest.Size())
priv, err := req.KeyRequest.Generate()
if err != nil {
err = cferr.Wrap(cferr.PrivateKeyError, cferr.GenerationFailed, err)
return
}
switch priv := priv.(type) {
case *rsa.PrivateKey:
key = x509.MarshalPKCS1PrivateKey(priv)
block := pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: key,
}
key = pem.EncodeToMemory(&block)
case *ecdsa.PrivateKey:
key, err = x509.MarshalECPrivateKey(priv)
if err != nil {
err = cferr.Wrap(cferr.PrivateKeyError, cferr.Unknown, err)
return
}
block := pem.Block{
Type: "EC PRIVATE KEY",
Bytes: key,
}
key = pem.EncodeToMemory(&block)
default:
panic("Generate should have failed to produce a valid key.")
}
csr, err = Generate(priv.(crypto.Signer), req)
if err != nil {
log.Errorf("failed to generate a CSR: %v", err)
err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)
}
return
}
// ExtractCertificateRequest extracts a CertificateRequest from
// x509.Certificate. It is aimed to used for generating a new certificate
// from an existing certificate. For a root certificate, the CA expiry
// length is calculated as the duration between cert.NotAfter and cert.NotBefore.
func ExtractCertificateRequest(cert *x509.Certificate) *CertificateRequest {
req := New()
req.CN = cert.Subject.CommonName
req.Names = getNames(cert.Subject)
req.Hosts = getHosts(cert)
req.SerialNumber = cert.Subject.SerialNumber
if cert.IsCA {
req.CA = new(CAConfig)
// CA expiry length is calculated based on the input cert
// issue date and expiry date.
req.CA.Expiry = cert.NotAfter.Sub(cert.NotBefore).String()
req.CA.PathLength = cert.MaxPathLen
req.CA.PathLenZero = cert.MaxPathLenZero
}
return req
}
func getHosts(cert *x509.Certificate) []string {
var hosts []string
for _, ip := range cert.IPAddresses {
hosts = append(hosts, ip.String())
}
for _, dns := range cert.DNSNames {
hosts = append(hosts, dns)
}
for _, email := range cert.EmailAddresses {
hosts = append(hosts, email)
}
for _, uri := range cert.URIs {
hosts = append(hosts, uri.String())
}
return hosts
}
// getNames returns an array of Names from the certificate
// It onnly cares about Country, Organization, OrganizationalUnit, Locality, Province
func getNames(sub pkix.Name) []Name {
// anonymous func for finding the max of a list of interger
max := func(v1 int, vn ...int) (max int) {
max = v1
for i := 0; i < len(vn); i++ {
if vn[i] > max {
max = vn[i]
}
}
return max
}
nc := len(sub.Country)
norg := len(sub.Organization)
nou := len(sub.OrganizationalUnit)
nl := len(sub.Locality)
np := len(sub.Province)
n := max(nc, norg, nou, nl, np)
names := make([]Name, n)
for i := range names {
if i < nc {
names[i].C = sub.Country[i]
}
if i < norg {
names[i].O = sub.Organization[i]
}
if i < nou {
names[i].OU = sub.OrganizationalUnit[i]
}
if i < nl {
names[i].L = sub.Locality[i]
}
if i < np {
names[i].ST = sub.Province[i]
}
}
return names
}
// A Generator is responsible for validating certificate requests.
type Generator struct {
Validator func(*CertificateRequest) error
}
// ProcessRequest validates and processes the incoming request. It is
// a wrapper around a validator and the ParseRequest function.
func (g *Generator) ProcessRequest(req *CertificateRequest) (csr, key []byte, err error) {
log.Info("generate received request")
err = g.Validator(req)
if err != nil {
log.Warningf("invalid request: %v", err)
return nil, nil, err
}
csr, key, err = ParseRequest(req)
if err != nil {
return nil, nil, err
}
return
}
// IsNameEmpty returns true if the name has no identifying information in it.
func IsNameEmpty(n Name) bool {
empty := func(s string) bool { return strings.TrimSpace(s) == "" }
if empty(n.C) && empty(n.ST) && empty(n.L) && empty(n.O) && empty(n.OU) {
return true
}
return false
}
// Regenerate uses the provided CSR as a template for signing a new
// CSR using priv.
func Regenerate(priv crypto.Signer, csr []byte) ([]byte, error) {
req, extra, err := helpers.ParseCSR(csr)
if err != nil {
return nil, err
} else if len(extra) > 0 {
return nil, errors.New("csr: trailing data in certificate request")
}
return x509.CreateCertificateRequest(rand.Reader, req, priv)
}
// Generate creates a new CSR from a CertificateRequest structure and
// an existing key. The KeyRequest field is ignored.
func Generate(priv crypto.Signer, req *CertificateRequest) (csr []byte, err error) {
sigAlgo := helpers.SignerAlgo(priv)
if sigAlgo == x509.UnknownSignatureAlgorithm {
return nil, cferr.New(cferr.PrivateKeyError, cferr.Unavailable)
}
var tpl = x509.CertificateRequest{
Subject: req.Name(),
SignatureAlgorithm: sigAlgo,
}
for i := range req.Hosts {
if ip := net.ParseIP(req.Hosts[i]); ip != nil {
tpl.IPAddresses = append(tpl.IPAddresses, ip)
} else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil {
tpl.EmailAddresses = append(tpl.EmailAddresses, email.Address)
} else if uri, err := url.ParseRequestURI(req.Hosts[i]); err == nil && uri != nil {
tpl.URIs = append(tpl.URIs, uri)
} else {
tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])
}
}
if req.CA != nil {
err = appendCAInfoToCSR(req.CA, &tpl)
if err != nil {
err = cferr.Wrap(cferr.CSRError, cferr.GenerationFailed, err)
return
}
}
csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv)
if err != nil {
log.Errorf("failed to generate a CSR: %v", err)
err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)
return
}
block := pem.Block{
Type: "CERTIFICATE REQUEST",
Bytes: csr,
}
log.Info("encoded CSR")
csr = pem.EncodeToMemory(&block)
return
}
// appendCAInfoToCSR appends CAConfig BasicConstraint extension to a CSR
func appendCAInfoToCSR(reqConf *CAConfig, csr *x509.CertificateRequest) error {
pathlen := reqConf.PathLength
if pathlen == 0 && !reqConf.PathLenZero {
pathlen = -1
}
val, err := asn1.Marshal(BasicConstraints{true, pathlen})
if err != nil {
return err
}
csr.ExtraExtensions = []pkix.Extension{
{
Id: asn1.ObjectIdentifier{2, 5, 29, 19},
Value: val,
Critical: true,
},
}
return nil
}

View File

@ -1,46 +0,0 @@
/*
Package errors provides error types returned in CF SSL.
1. Type Error is intended for errors produced by CF SSL packages.
It formats to a json object that consists of an error message and a 4-digit code for error reasoning.
Example: {"code":1002, "message": "Failed to decode certificate"}
The index of codes are listed below:
1XXX: CertificateError
1000: Unknown
1001: ReadFailed
1002: DecodeFailed
1003: ParseFailed
1100: SelfSigned
12XX: VerifyFailed
121X: CertificateInvalid
1210: NotAuthorizedToSign
1211: Expired
1212: CANotAuthorizedForThisName
1213: TooManyIntermediates
1214: IncompatibleUsage
1220: UnknownAuthority
2XXX: PrivatekeyError
2000: Unknown
2001: ReadFailed
2002: DecodeFailed
2003: ParseFailed
2100: Encrypted
2200: NotRSA
2300: KeyMismatch
2400: GenerationFailed
2500: Unavailable
3XXX: IntermediatesError
4XXX: RootError
5XXX: PolicyError
5100: NoKeyUsages
5200: InvalidPolicy
5300: InvalidRequest
5400: UnknownProfile
6XXX: DialError
2. Type HttpError is intended for CF SSL API to consume. It contains a HTTP status code that will be read and returned
by the API server.
*/
package errors

View File

@ -1,438 +0,0 @@
package errors
import (
"crypto/x509"
"encoding/json"
"fmt"
)
// Error is the error type usually returned by functions in CF SSL package.
// It contains a 4-digit error code where the most significant digit
// describes the category where the error occurred and the rest 3 digits
// describe the specific error reason.
type Error struct {
ErrorCode int `json:"code"`
Message string `json:"message"`
}
// Category is the most significant digit of the error code.
type Category int
// Reason is the last 3 digits of the error code.
type Reason int
const (
// Success indicates no error occurred.
Success Category = 1000 * iota // 0XXX
// CertificateError indicates a fault in a certificate.
CertificateError // 1XXX
// PrivateKeyError indicates a fault in a private key.
PrivateKeyError // 2XXX
// IntermediatesError indicates a fault in an intermediate.
IntermediatesError // 3XXX
// RootError indicates a fault in a root.
RootError // 4XXX
// PolicyError indicates an error arising from a malformed or
// non-existent policy, or a breach of policy.
PolicyError // 5XXX
// DialError indicates a network fault.
DialError // 6XXX
// APIClientError indicates a problem with the API client.
APIClientError // 7XXX
// OCSPError indicates a problem with OCSP signing
OCSPError // 8XXX
// CSRError indicates a problem with CSR parsing
CSRError // 9XXX
// CTError indicates a problem with the certificate transparency process
CTError // 10XXX
// CertStoreError indicates a problem with the certificate store
CertStoreError // 11XXX
)
// None is a non-specified error.
const (
None Reason = iota
)
// Warning code for a success
const (
BundleExpiringBit int = 1 << iota // 0x01
BundleNotUbiquitousBit // 0x02
)
// Parsing errors
const (
Unknown Reason = iota // X000
ReadFailed // X001
DecodeFailed // X002
ParseFailed // X003
)
// The following represent certificate non-parsing errors, and must be
// specified along with CertificateError.
const (
// SelfSigned indicates that a certificate is self-signed and
// cannot be used in the manner being attempted.
SelfSigned Reason = 100 * (iota + 1) // Code 11XX
// VerifyFailed is an X.509 verification failure. The least two
// significant digits of 12XX is determined as the actual x509
// error is examined.
VerifyFailed // Code 12XX
// BadRequest indicates that the certificate request is invalid.
BadRequest // Code 13XX
// MissingSerial indicates that the profile specified
// 'ClientProvidesSerialNumbers', but the SignRequest did not include a serial
// number.
MissingSerial // Code 14XX
)
const (
certificateInvalid = 10 * (iota + 1) //121X
unknownAuthority //122x
)
// The following represent private-key non-parsing errors, and must be
// specified with PrivateKeyError.
const (
// Encrypted indicates that the private key is a PKCS #8 encrypted
// private key. At this time, CFSSL does not support decrypting
// these keys.
Encrypted Reason = 100 * (iota + 1) //21XX
// NotRSAOrECC indicates that they key is not an RSA or ECC
// private key; these are the only two private key types supported
// at this time by CFSSL.
NotRSAOrECC //22XX
// KeyMismatch indicates that the private key does not match
// the public key or certificate being presented with the key.
KeyMismatch //23XX
// GenerationFailed indicates that a private key could not
// be generated.
GenerationFailed //24XX
// Unavailable indicates that a private key mechanism (such as
// PKCS #11) was requested but support for that mechanism is
// not available.
Unavailable
)
// The following are policy-related non-parsing errors, and must be
// specified along with PolicyError.
const (
// NoKeyUsages indicates that the profile does not permit any
// key usages for the certificate.
NoKeyUsages Reason = 100 * (iota + 1) // 51XX
// InvalidPolicy indicates that policy being requested is not
// a valid policy or does not exist.
InvalidPolicy // 52XX
// InvalidRequest indicates a certificate request violated the
// constraints of the policy being applied to the request.
InvalidRequest // 53XX
// UnknownProfile indicates that the profile does not exist.
UnknownProfile // 54XX
UnmatchedWhitelist // 55xx
)
// The following are API client related errors, and should be
// specified with APIClientError.
const (
// AuthenticationFailure occurs when the client is unable
// to obtain an authentication token for the request.
AuthenticationFailure Reason = 100 * (iota + 1)
// JSONError wraps an encoding/json error.
JSONError
// IOError wraps an io/ioutil error.
IOError
// ClientHTTPError wraps a net/http error.
ClientHTTPError
// ServerRequestFailed covers any other failures from the API
// client.
ServerRequestFailed
)
// The following are OCSP related errors, and should be
// specified with OCSPError
const (
// IssuerMismatch ocurs when the certificate in the OCSP signing
// request was not issued by the CA that this responder responds for.
IssuerMismatch Reason = 100 * (iota + 1) // 81XX
// InvalidStatus occurs when the OCSP signing requests includes an
// invalid value for the certificate status.
InvalidStatus
)
// Certificate transparency related errors specified with CTError
const (
// PrecertSubmissionFailed occurs when submitting a precertificate to
// a log server fails
PrecertSubmissionFailed = 100 * (iota + 1)
// CTClientConstructionFailed occurs when the construction of a new
// github.com/google/certificate-transparency client fails.
CTClientConstructionFailed
// PrecertMissingPoison occurs when a precert is passed to SignFromPrecert
// and is missing the CT poison extension.
PrecertMissingPoison
// PrecertInvalidPoison occurs when a precert is passed to SignFromPrecert
// and has a invalid CT poison extension value or the extension is not
// critical.
PrecertInvalidPoison
)
// Certificate persistence related errors specified with CertStoreError
const (
// InsertionFailed occurs when a SQL insert query failes to complete.
InsertionFailed = 100 * (iota + 1)
// RecordNotFound occurs when a SQL query targeting on one unique
// record failes to update the specified row in the table.
RecordNotFound
)
// The error interface implementation, which formats to a JSON object string.
func (e *Error) Error() string {
marshaled, err := json.Marshal(e)
if err != nil {
panic(err)
}
return string(marshaled)
}
// New returns an error that contains an error code and message derived from
// the given category, reason. Currently, to avoid confusion, it is not
// allowed to create an error of category Success
func New(category Category, reason Reason) *Error {
errorCode := int(category) + int(reason)
var msg string
switch category {
case OCSPError:
switch reason {
case ReadFailed:
msg = "No certificate provided"
case IssuerMismatch:
msg = "Certificate not issued by this issuer"
case InvalidStatus:
msg = "Invalid revocation status"
}
case CertificateError:
switch reason {
case Unknown:
msg = "Unknown certificate error"
case ReadFailed:
msg = "Failed to read certificate"
case DecodeFailed:
msg = "Failed to decode certificate"
case ParseFailed:
msg = "Failed to parse certificate"
case SelfSigned:
msg = "Certificate is self signed"
case VerifyFailed:
msg = "Unable to verify certificate"
case BadRequest:
msg = "Invalid certificate request"
case MissingSerial:
msg = "Missing serial number in request"
default:
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category CertificateError.",
reason))
}
case PrivateKeyError:
switch reason {
case Unknown:
msg = "Unknown private key error"
case ReadFailed:
msg = "Failed to read private key"
case DecodeFailed:
msg = "Failed to decode private key"
case ParseFailed:
msg = "Failed to parse private key"
case Encrypted:
msg = "Private key is encrypted."
case NotRSAOrECC:
msg = "Private key algorithm is not RSA or ECC"
case KeyMismatch:
msg = "Private key does not match public key"
case GenerationFailed:
msg = "Failed to new private key"
case Unavailable:
msg = "Private key is unavailable"
default:
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PrivateKeyError.",
reason))
}
case IntermediatesError:
switch reason {
case Unknown:
msg = "Unknown intermediate certificate error"
case ReadFailed:
msg = "Failed to read intermediate certificate"
case DecodeFailed:
msg = "Failed to decode intermediate certificate"
case ParseFailed:
msg = "Failed to parse intermediate certificate"
default:
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category IntermediatesError.",
reason))
}
case RootError:
switch reason {
case Unknown:
msg = "Unknown root certificate error"
case ReadFailed:
msg = "Failed to read root certificate"
case DecodeFailed:
msg = "Failed to decode root certificate"
case ParseFailed:
msg = "Failed to parse root certificate"
default:
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category RootError.",
reason))
}
case PolicyError:
switch reason {
case Unknown:
msg = "Unknown policy error"
case NoKeyUsages:
msg = "Invalid policy: no key usage available"
case InvalidPolicy:
msg = "Invalid or unknown policy"
case InvalidRequest:
msg = "Policy violation request"
case UnknownProfile:
msg = "Unknown policy profile"
case UnmatchedWhitelist:
msg = "Request does not match policy whitelist"
default:
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PolicyError.",
reason))
}
case DialError:
switch reason {
case Unknown:
msg = "Failed to dial remote server"
default:
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category DialError.",
reason))
}
case APIClientError:
switch reason {
case AuthenticationFailure:
msg = "API client authentication failure"
case JSONError:
msg = "API client JSON config error"
case ClientHTTPError:
msg = "API client HTTP error"
case IOError:
msg = "API client IO error"
case ServerRequestFailed:
msg = "API client error: Server request failed"
default:
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category APIClientError.",
reason))
}
case CSRError:
switch reason {
case Unknown:
msg = "CSR parsing failed due to unknown error"
case ReadFailed:
msg = "CSR file read failed"
case ParseFailed:
msg = "CSR Parsing failed"
case DecodeFailed:
msg = "CSR Decode failed"
case BadRequest:
msg = "CSR Bad request"
default:
panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category APIClientError.", reason))
}
case CTError:
switch reason {
case Unknown:
msg = "Certificate transparency parsing failed due to unknown error"
case PrecertSubmissionFailed:
msg = "Certificate transparency precertificate submission failed"
case PrecertMissingPoison:
msg = "Precertificate is missing CT poison extension"
case PrecertInvalidPoison:
msg = "Precertificate contains an invalid CT poison extension"
default:
panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CTError.", reason))
}
case CertStoreError:
switch reason {
case Unknown:
msg = "Certificate store action failed due to unknown error"
default:
panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CertStoreError.", reason))
}
default:
panic(fmt.Sprintf("Unsupported CFSSL error type: %d.",
category))
}
return &Error{ErrorCode: errorCode, Message: msg}
}
// Wrap returns an error that contains the given error and an error code derived from
// the given category, reason and the error. Currently, to avoid confusion, it is not
// allowed to create an error of category Success
func Wrap(category Category, reason Reason, err error) *Error {
errorCode := int(category) + int(reason)
if err == nil {
panic("Wrap needs a supplied error to initialize.")
}
// do not double wrap a error
switch err.(type) {
case *Error:
panic("Unable to wrap a wrapped error.")
}
switch category {
case CertificateError:
// given VerifyFailed , report the status with more detailed status code
// for some certificate errors we care.
if reason == VerifyFailed {
switch errorType := err.(type) {
case x509.CertificateInvalidError:
errorCode += certificateInvalid + int(errorType.Reason)
case x509.UnknownAuthorityError:
errorCode += unknownAuthority
}
}
case PrivateKeyError, IntermediatesError, RootError, PolicyError, DialError,
APIClientError, CSRError, CTError, CertStoreError, OCSPError:
// no-op, just use the error
default:
panic(fmt.Sprintf("Unsupported CFSSL error type: %d.",
category))
}
return &Error{ErrorCode: errorCode, Message: err.Error()}
}

Some files were not shown because too many files have changed in this diff Show More