refactor to go-restful
This commit is contained in:
27
vendor/github.com/rogpeppe/go-internal/LICENSE
generated
vendored
Normal file
27
vendor/github.com/rogpeppe/go-internal/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2018 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
47
vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go
generated
vendored
Normal file
47
vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// TODO: Figure out what gopkg.in should do.
|
||||
|
||||
package modfile
|
||||
|
||||
import "strings"
|
||||
|
||||
// ParseGopkgIn splits gopkg.in import paths into their constituent parts
|
||||
func ParseGopkgIn(path string) (root, repo, major, subdir string, ok bool) {
|
||||
if !strings.HasPrefix(path, "gopkg.in/") {
|
||||
return
|
||||
}
|
||||
f := strings.Split(path, "/")
|
||||
if len(f) >= 2 {
|
||||
if elem, v, ok := dotV(f[1]); ok {
|
||||
root = strings.Join(f[:2], "/")
|
||||
repo = "github.com/go-" + elem + "/" + elem
|
||||
major = v
|
||||
subdir = strings.Join(f[2:], "/")
|
||||
return root, repo, major, subdir, true
|
||||
}
|
||||
}
|
||||
if len(f) >= 3 {
|
||||
if elem, v, ok := dotV(f[2]); ok {
|
||||
root = strings.Join(f[:3], "/")
|
||||
repo = "github.com/" + f[1] + "/" + elem
|
||||
major = v
|
||||
subdir = strings.Join(f[3:], "/")
|
||||
return root, repo, major, subdir, true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func dotV(name string) (elem, v string, ok bool) {
|
||||
i := len(name) - 1
|
||||
for i >= 0 && '0' <= name[i] && name[i] <= '9' {
|
||||
i--
|
||||
}
|
||||
if i <= 2 || i+1 >= len(name) || name[i-1] != '.' || name[i] != 'v' || name[i+1] == '0' && len(name) != i+2 {
|
||||
return "", "", false
|
||||
}
|
||||
return name[:i-1], name[i:], true
|
||||
}
|
164
vendor/github.com/rogpeppe/go-internal/modfile/print.go
generated
vendored
Normal file
164
vendor/github.com/rogpeppe/go-internal/modfile/print.go
generated
vendored
Normal file
@ -0,0 +1,164 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package modfile implements parsing and formatting for
|
||||
// go.mod files.
|
||||
package modfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func Format(f *FileSyntax) []byte {
|
||||
pr := &printer{}
|
||||
pr.file(f)
|
||||
return pr.Bytes()
|
||||
}
|
||||
|
||||
// A printer collects the state during printing of a file or expression.
|
||||
type printer struct {
|
||||
bytes.Buffer // output buffer
|
||||
comment []Comment // pending end-of-line comments
|
||||
margin int // left margin (indent), a number of tabs
|
||||
}
|
||||
|
||||
// printf prints to the buffer.
|
||||
func (p *printer) printf(format string, args ...interface{}) {
|
||||
fmt.Fprintf(p, format, args...)
|
||||
}
|
||||
|
||||
// indent returns the position on the current line, in bytes, 0-indexed.
|
||||
func (p *printer) indent() int {
|
||||
b := p.Bytes()
|
||||
n := 0
|
||||
for n < len(b) && b[len(b)-1-n] != '\n' {
|
||||
n++
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// newline ends the current line, flushing end-of-line comments.
|
||||
func (p *printer) newline() {
|
||||
if len(p.comment) > 0 {
|
||||
p.printf(" ")
|
||||
for i, com := range p.comment {
|
||||
if i > 0 {
|
||||
p.trim()
|
||||
p.printf("\n")
|
||||
for i := 0; i < p.margin; i++ {
|
||||
p.printf("\t")
|
||||
}
|
||||
}
|
||||
p.printf("%s", strings.TrimSpace(com.Token))
|
||||
}
|
||||
p.comment = p.comment[:0]
|
||||
}
|
||||
|
||||
p.trim()
|
||||
p.printf("\n")
|
||||
for i := 0; i < p.margin; i++ {
|
||||
p.printf("\t")
|
||||
}
|
||||
}
|
||||
|
||||
// trim removes trailing spaces and tabs from the current line.
|
||||
func (p *printer) trim() {
|
||||
// Remove trailing spaces and tabs from line we're about to end.
|
||||
b := p.Bytes()
|
||||
n := len(b)
|
||||
for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') {
|
||||
n--
|
||||
}
|
||||
p.Truncate(n)
|
||||
}
|
||||
|
||||
// file formats the given file into the print buffer.
|
||||
func (p *printer) file(f *FileSyntax) {
|
||||
for _, com := range f.Before {
|
||||
p.printf("%s", strings.TrimSpace(com.Token))
|
||||
p.newline()
|
||||
}
|
||||
|
||||
for i, stmt := range f.Stmt {
|
||||
switch x := stmt.(type) {
|
||||
case *CommentBlock:
|
||||
// comments already handled
|
||||
p.expr(x)
|
||||
|
||||
default:
|
||||
p.expr(x)
|
||||
p.newline()
|
||||
}
|
||||
|
||||
for _, com := range stmt.Comment().After {
|
||||
p.printf("%s", strings.TrimSpace(com.Token))
|
||||
p.newline()
|
||||
}
|
||||
|
||||
if i+1 < len(f.Stmt) {
|
||||
p.newline()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *printer) expr(x Expr) {
|
||||
// Emit line-comments preceding this expression.
|
||||
if before := x.Comment().Before; len(before) > 0 {
|
||||
// Want to print a line comment.
|
||||
// Line comments must be at the current margin.
|
||||
p.trim()
|
||||
if p.indent() > 0 {
|
||||
// There's other text on the line. Start a new line.
|
||||
p.printf("\n")
|
||||
}
|
||||
// Re-indent to margin.
|
||||
for i := 0; i < p.margin; i++ {
|
||||
p.printf("\t")
|
||||
}
|
||||
for _, com := range before {
|
||||
p.printf("%s", strings.TrimSpace(com.Token))
|
||||
p.newline()
|
||||
}
|
||||
}
|
||||
|
||||
switch x := x.(type) {
|
||||
default:
|
||||
panic(fmt.Errorf("printer: unexpected type %T", x))
|
||||
|
||||
case *CommentBlock:
|
||||
// done
|
||||
|
||||
case *LParen:
|
||||
p.printf("(")
|
||||
case *RParen:
|
||||
p.printf(")")
|
||||
|
||||
case *Line:
|
||||
sep := ""
|
||||
for _, tok := range x.Token {
|
||||
p.printf("%s%s", sep, tok)
|
||||
sep = " "
|
||||
}
|
||||
|
||||
case *LineBlock:
|
||||
for _, tok := range x.Token {
|
||||
p.printf("%s ", tok)
|
||||
}
|
||||
p.expr(&x.LParen)
|
||||
p.margin++
|
||||
for _, l := range x.Line {
|
||||
p.newline()
|
||||
p.expr(l)
|
||||
}
|
||||
p.margin--
|
||||
p.newline()
|
||||
p.expr(&x.RParen)
|
||||
}
|
||||
|
||||
// Queue end-of-line comments for printing when we
|
||||
// reach the end of the line.
|
||||
p.comment = append(p.comment, x.Comment().Suffix...)
|
||||
}
|
869
vendor/github.com/rogpeppe/go-internal/modfile/read.go
generated
vendored
Normal file
869
vendor/github.com/rogpeppe/go-internal/modfile/read.go
generated
vendored
Normal file
@ -0,0 +1,869 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Module file parser.
|
||||
// This is a simplified copy of Google's buildifier parser.
|
||||
|
||||
package modfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// A Position describes the position between two bytes of input.
|
||||
type Position struct {
|
||||
Line int // line in input (starting at 1)
|
||||
LineRune int // rune in line (starting at 1)
|
||||
Byte int // byte in input (starting at 0)
|
||||
}
|
||||
|
||||
// add returns the position at the end of s, assuming it starts at p.
|
||||
func (p Position) add(s string) Position {
|
||||
p.Byte += len(s)
|
||||
if n := strings.Count(s, "\n"); n > 0 {
|
||||
p.Line += n
|
||||
s = s[strings.LastIndex(s, "\n")+1:]
|
||||
p.LineRune = 1
|
||||
}
|
||||
p.LineRune += utf8.RuneCountInString(s)
|
||||
return p
|
||||
}
|
||||
|
||||
// An Expr represents an input element.
|
||||
type Expr interface {
|
||||
// Span returns the start and end position of the expression,
|
||||
// excluding leading or trailing comments.
|
||||
Span() (start, end Position)
|
||||
|
||||
// Comment returns the comments attached to the expression.
|
||||
// This method would normally be named 'Comments' but that
|
||||
// would interfere with embedding a type of the same name.
|
||||
Comment() *Comments
|
||||
}
|
||||
|
||||
// A Comment represents a single // comment.
|
||||
type Comment struct {
|
||||
Start Position
|
||||
Token string // without trailing newline
|
||||
Suffix bool // an end of line (not whole line) comment
|
||||
}
|
||||
|
||||
// Comments collects the comments associated with an expression.
|
||||
type Comments struct {
|
||||
Before []Comment // whole-line comments before this expression
|
||||
Suffix []Comment // end-of-line comments after this expression
|
||||
|
||||
// For top-level expressions only, After lists whole-line
|
||||
// comments following the expression.
|
||||
After []Comment
|
||||
}
|
||||
|
||||
// Comment returns the receiver. This isn't useful by itself, but
|
||||
// a Comments struct is embedded into all the expression
|
||||
// implementation types, and this gives each of those a Comment
|
||||
// method to satisfy the Expr interface.
|
||||
func (c *Comments) Comment() *Comments {
|
||||
return c
|
||||
}
|
||||
|
||||
// A FileSyntax represents an entire go.mod file.
|
||||
type FileSyntax struct {
|
||||
Name string // file path
|
||||
Comments
|
||||
Stmt []Expr
|
||||
}
|
||||
|
||||
func (x *FileSyntax) Span() (start, end Position) {
|
||||
if len(x.Stmt) == 0 {
|
||||
return
|
||||
}
|
||||
start, _ = x.Stmt[0].Span()
|
||||
_, end = x.Stmt[len(x.Stmt)-1].Span()
|
||||
return start, end
|
||||
}
|
||||
|
||||
func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line {
|
||||
if hint == nil {
|
||||
// If no hint given, add to the last statement of the given type.
|
||||
Loop:
|
||||
for i := len(x.Stmt) - 1; i >= 0; i-- {
|
||||
stmt := x.Stmt[i]
|
||||
switch stmt := stmt.(type) {
|
||||
case *Line:
|
||||
if stmt.Token != nil && stmt.Token[0] == tokens[0] {
|
||||
hint = stmt
|
||||
break Loop
|
||||
}
|
||||
case *LineBlock:
|
||||
if stmt.Token[0] == tokens[0] {
|
||||
hint = stmt
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if hint != nil {
|
||||
for i, stmt := range x.Stmt {
|
||||
switch stmt := stmt.(type) {
|
||||
case *Line:
|
||||
if stmt == hint {
|
||||
// Convert line to line block.
|
||||
stmt.InBlock = true
|
||||
block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}}
|
||||
stmt.Token = stmt.Token[1:]
|
||||
x.Stmt[i] = block
|
||||
new := &Line{Token: tokens[1:], InBlock: true}
|
||||
block.Line = append(block.Line, new)
|
||||
return new
|
||||
}
|
||||
case *LineBlock:
|
||||
if stmt == hint {
|
||||
new := &Line{Token: tokens[1:], InBlock: true}
|
||||
stmt.Line = append(stmt.Line, new)
|
||||
return new
|
||||
}
|
||||
for j, line := range stmt.Line {
|
||||
if line == hint {
|
||||
// Add new line after hint.
|
||||
stmt.Line = append(stmt.Line, nil)
|
||||
copy(stmt.Line[j+2:], stmt.Line[j+1:])
|
||||
new := &Line{Token: tokens[1:], InBlock: true}
|
||||
stmt.Line[j+1] = new
|
||||
return new
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
new := &Line{Token: tokens}
|
||||
x.Stmt = append(x.Stmt, new)
|
||||
return new
|
||||
}
|
||||
|
||||
func (x *FileSyntax) updateLine(line *Line, tokens ...string) {
|
||||
if line.InBlock {
|
||||
tokens = tokens[1:]
|
||||
}
|
||||
line.Token = tokens
|
||||
}
|
||||
|
||||
func (x *FileSyntax) removeLine(line *Line) {
|
||||
line.Token = nil
|
||||
}
|
||||
|
||||
// Cleanup cleans up the file syntax x after any edit operations.
|
||||
// To avoid quadratic behavior, removeLine marks the line as dead
|
||||
// by setting line.Token = nil but does not remove it from the slice
|
||||
// in which it appears. After edits have all been indicated,
|
||||
// calling Cleanup cleans out the dead lines.
|
||||
func (x *FileSyntax) Cleanup() {
|
||||
w := 0
|
||||
for _, stmt := range x.Stmt {
|
||||
switch stmt := stmt.(type) {
|
||||
case *Line:
|
||||
if stmt.Token == nil {
|
||||
continue
|
||||
}
|
||||
case *LineBlock:
|
||||
ww := 0
|
||||
for _, line := range stmt.Line {
|
||||
if line.Token != nil {
|
||||
stmt.Line[ww] = line
|
||||
ww++
|
||||
}
|
||||
}
|
||||
if ww == 0 {
|
||||
continue
|
||||
}
|
||||
if ww == 1 {
|
||||
// Collapse block into single line.
|
||||
line := &Line{
|
||||
Comments: Comments{
|
||||
Before: commentsAdd(stmt.Before, stmt.Line[0].Before),
|
||||
Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix),
|
||||
After: commentsAdd(stmt.Line[0].After, stmt.After),
|
||||
},
|
||||
Token: stringsAdd(stmt.Token, stmt.Line[0].Token),
|
||||
}
|
||||
x.Stmt[w] = line
|
||||
w++
|
||||
continue
|
||||
}
|
||||
stmt.Line = stmt.Line[:ww]
|
||||
}
|
||||
x.Stmt[w] = stmt
|
||||
w++
|
||||
}
|
||||
x.Stmt = x.Stmt[:w]
|
||||
}
|
||||
|
||||
func commentsAdd(x, y []Comment) []Comment {
|
||||
return append(x[:len(x):len(x)], y...)
|
||||
}
|
||||
|
||||
func stringsAdd(x, y []string) []string {
|
||||
return append(x[:len(x):len(x)], y...)
|
||||
}
|
||||
|
||||
// A CommentBlock represents a top-level block of comments separate
|
||||
// from any rule.
|
||||
type CommentBlock struct {
|
||||
Comments
|
||||
Start Position
|
||||
}
|
||||
|
||||
func (x *CommentBlock) Span() (start, end Position) {
|
||||
return x.Start, x.Start
|
||||
}
|
||||
|
||||
// A Line is a single line of tokens.
|
||||
type Line struct {
|
||||
Comments
|
||||
Start Position
|
||||
Token []string
|
||||
InBlock bool
|
||||
End Position
|
||||
}
|
||||
|
||||
func (x *Line) Span() (start, end Position) {
|
||||
return x.Start, x.End
|
||||
}
|
||||
|
||||
// A LineBlock is a factored block of lines, like
|
||||
//
|
||||
// require (
|
||||
// "x"
|
||||
// "y"
|
||||
// )
|
||||
//
|
||||
type LineBlock struct {
|
||||
Comments
|
||||
Start Position
|
||||
LParen LParen
|
||||
Token []string
|
||||
Line []*Line
|
||||
RParen RParen
|
||||
}
|
||||
|
||||
func (x *LineBlock) Span() (start, end Position) {
|
||||
return x.Start, x.RParen.Pos.add(")")
|
||||
}
|
||||
|
||||
// An LParen represents the beginning of a parenthesized line block.
|
||||
// It is a place to store suffix comments.
|
||||
type LParen struct {
|
||||
Comments
|
||||
Pos Position
|
||||
}
|
||||
|
||||
func (x *LParen) Span() (start, end Position) {
|
||||
return x.Pos, x.Pos.add(")")
|
||||
}
|
||||
|
||||
// An RParen represents the end of a parenthesized line block.
|
||||
// It is a place to store whole-line (before) comments.
|
||||
type RParen struct {
|
||||
Comments
|
||||
Pos Position
|
||||
}
|
||||
|
||||
func (x *RParen) Span() (start, end Position) {
|
||||
return x.Pos, x.Pos.add(")")
|
||||
}
|
||||
|
||||
// An input represents a single input file being parsed.
|
||||
type input struct {
|
||||
// Lexing state.
|
||||
filename string // name of input file, for errors
|
||||
complete []byte // entire input
|
||||
remaining []byte // remaining input
|
||||
token []byte // token being scanned
|
||||
lastToken string // most recently returned token, for error messages
|
||||
pos Position // current input position
|
||||
comments []Comment // accumulated comments
|
||||
endRule int // position of end of current rule
|
||||
|
||||
// Parser state.
|
||||
file *FileSyntax // returned top-level syntax tree
|
||||
parseError error // error encountered during parsing
|
||||
|
||||
// Comment assignment state.
|
||||
pre []Expr // all expressions, in preorder traversal
|
||||
post []Expr // all expressions, in postorder traversal
|
||||
}
|
||||
|
||||
func newInput(filename string, data []byte) *input {
|
||||
return &input{
|
||||
filename: filename,
|
||||
complete: data,
|
||||
remaining: data,
|
||||
pos: Position{Line: 1, LineRune: 1, Byte: 0},
|
||||
}
|
||||
}
|
||||
|
||||
// parse parses the input file.
|
||||
func parse(file string, data []byte) (f *FileSyntax, err error) {
|
||||
in := newInput(file, data)
|
||||
// The parser panics for both routine errors like syntax errors
|
||||
// and for programmer bugs like array index errors.
|
||||
// Turn both into error returns. Catching bug panics is
|
||||
// especially important when processing many files.
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
if e == in.parseError {
|
||||
err = in.parseError
|
||||
} else {
|
||||
err = fmt.Errorf("%s:%d:%d: internal error: %v", in.filename, in.pos.Line, in.pos.LineRune, e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Invoke the parser.
|
||||
in.parseFile()
|
||||
if in.parseError != nil {
|
||||
return nil, in.parseError
|
||||
}
|
||||
in.file.Name = in.filename
|
||||
|
||||
// Assign comments to nearby syntax.
|
||||
in.assignComments()
|
||||
|
||||
return in.file, nil
|
||||
}
|
||||
|
||||
// Error is called to report an error.
|
||||
// The reason s is often "syntax error".
|
||||
// Error does not return: it panics.
|
||||
func (in *input) Error(s string) {
|
||||
if s == "syntax error" && in.lastToken != "" {
|
||||
s += " near " + in.lastToken
|
||||
}
|
||||
in.parseError = fmt.Errorf("%s:%d:%d: %v", in.filename, in.pos.Line, in.pos.LineRune, s)
|
||||
panic(in.parseError)
|
||||
}
|
||||
|
||||
// eof reports whether the input has reached end of file.
|
||||
func (in *input) eof() bool {
|
||||
return len(in.remaining) == 0
|
||||
}
|
||||
|
||||
// peekRune returns the next rune in the input without consuming it.
|
||||
func (in *input) peekRune() int {
|
||||
if len(in.remaining) == 0 {
|
||||
return 0
|
||||
}
|
||||
r, _ := utf8.DecodeRune(in.remaining)
|
||||
return int(r)
|
||||
}
|
||||
|
||||
// peekPrefix reports whether the remaining input begins with the given prefix.
|
||||
func (in *input) peekPrefix(prefix string) bool {
|
||||
// This is like bytes.HasPrefix(in.remaining, []byte(prefix))
|
||||
// but without the allocation of the []byte copy of prefix.
|
||||
for i := 0; i < len(prefix); i++ {
|
||||
if i >= len(in.remaining) || in.remaining[i] != prefix[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// readRune consumes and returns the next rune in the input.
|
||||
func (in *input) readRune() int {
|
||||
if len(in.remaining) == 0 {
|
||||
in.Error("internal lexer error: readRune at EOF")
|
||||
}
|
||||
r, size := utf8.DecodeRune(in.remaining)
|
||||
in.remaining = in.remaining[size:]
|
||||
if r == '\n' {
|
||||
in.pos.Line++
|
||||
in.pos.LineRune = 1
|
||||
} else {
|
||||
in.pos.LineRune++
|
||||
}
|
||||
in.pos.Byte += size
|
||||
return int(r)
|
||||
}
|
||||
|
||||
type symType struct {
|
||||
pos Position
|
||||
endPos Position
|
||||
text string
|
||||
}
|
||||
|
||||
// startToken marks the beginning of the next input token.
|
||||
// It must be followed by a call to endToken, once the token has
|
||||
// been consumed using readRune.
|
||||
func (in *input) startToken(sym *symType) {
|
||||
in.token = in.remaining
|
||||
sym.text = ""
|
||||
sym.pos = in.pos
|
||||
}
|
||||
|
||||
// endToken marks the end of an input token.
|
||||
// It records the actual token string in sym.text if the caller
|
||||
// has not done that already.
|
||||
func (in *input) endToken(sym *symType) {
|
||||
if sym.text == "" {
|
||||
tok := string(in.token[:len(in.token)-len(in.remaining)])
|
||||
sym.text = tok
|
||||
in.lastToken = sym.text
|
||||
}
|
||||
sym.endPos = in.pos
|
||||
}
|
||||
|
||||
// lex is called from the parser to obtain the next input token.
|
||||
// It returns the token value (either a rune like '+' or a symbolic token _FOR)
|
||||
// and sets val to the data associated with the token.
|
||||
// For all our input tokens, the associated data is
|
||||
// val.Pos (the position where the token begins)
|
||||
// and val.Token (the input string corresponding to the token).
|
||||
func (in *input) lex(sym *symType) int {
|
||||
// Skip past spaces, stopping at non-space or EOF.
|
||||
countNL := 0 // number of newlines we've skipped past
|
||||
for !in.eof() {
|
||||
// Skip over spaces. Count newlines so we can give the parser
|
||||
// information about where top-level blank lines are,
|
||||
// for top-level comment assignment.
|
||||
c := in.peekRune()
|
||||
if c == ' ' || c == '\t' || c == '\r' {
|
||||
in.readRune()
|
||||
continue
|
||||
}
|
||||
|
||||
// Comment runs to end of line.
|
||||
if in.peekPrefix("//") {
|
||||
in.startToken(sym)
|
||||
|
||||
// Is this comment the only thing on its line?
|
||||
// Find the last \n before this // and see if it's all
|
||||
// spaces from there to here.
|
||||
i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n"))
|
||||
suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0
|
||||
in.readRune()
|
||||
in.readRune()
|
||||
|
||||
// Consume comment.
|
||||
for len(in.remaining) > 0 && in.readRune() != '\n' {
|
||||
}
|
||||
in.endToken(sym)
|
||||
|
||||
sym.text = strings.TrimRight(sym.text, "\n")
|
||||
in.lastToken = "comment"
|
||||
|
||||
// If we are at top level (not in a statement), hand the comment to
|
||||
// the parser as a _COMMENT token. The grammar is written
|
||||
// to handle top-level comments itself.
|
||||
if !suffix {
|
||||
// Not in a statement. Tell parser about top-level comment.
|
||||
return _COMMENT
|
||||
}
|
||||
|
||||
// Otherwise, save comment for later attachment to syntax tree.
|
||||
if countNL > 1 {
|
||||
in.comments = append(in.comments, Comment{sym.pos, "", false})
|
||||
}
|
||||
in.comments = append(in.comments, Comment{sym.pos, sym.text, suffix})
|
||||
countNL = 1
|
||||
return _EOL
|
||||
}
|
||||
|
||||
if in.peekPrefix("/*") {
|
||||
in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)"))
|
||||
}
|
||||
|
||||
// Found non-space non-comment.
|
||||
break
|
||||
}
|
||||
|
||||
// Found the beginning of the next token.
|
||||
in.startToken(sym)
|
||||
defer in.endToken(sym)
|
||||
|
||||
// End of file.
|
||||
if in.eof() {
|
||||
in.lastToken = "EOF"
|
||||
return _EOF
|
||||
}
|
||||
|
||||
// Punctuation tokens.
|
||||
switch c := in.peekRune(); c {
|
||||
case '\n':
|
||||
in.readRune()
|
||||
return c
|
||||
|
||||
case '(':
|
||||
in.readRune()
|
||||
return c
|
||||
|
||||
case ')':
|
||||
in.readRune()
|
||||
return c
|
||||
|
||||
case '"', '`': // quoted string
|
||||
quote := c
|
||||
in.readRune()
|
||||
for {
|
||||
if in.eof() {
|
||||
in.pos = sym.pos
|
||||
in.Error("unexpected EOF in string")
|
||||
}
|
||||
if in.peekRune() == '\n' {
|
||||
in.Error("unexpected newline in string")
|
||||
}
|
||||
c := in.readRune()
|
||||
if c == quote {
|
||||
break
|
||||
}
|
||||
if c == '\\' && quote != '`' {
|
||||
if in.eof() {
|
||||
in.pos = sym.pos
|
||||
in.Error("unexpected EOF in string")
|
||||
}
|
||||
in.readRune()
|
||||
}
|
||||
}
|
||||
in.endToken(sym)
|
||||
return _STRING
|
||||
}
|
||||
|
||||
// Checked all punctuation. Must be identifier token.
|
||||
if c := in.peekRune(); !isIdent(c) {
|
||||
in.Error(fmt.Sprintf("unexpected input character %#q", c))
|
||||
}
|
||||
|
||||
// Scan over identifier.
|
||||
for isIdent(in.peekRune()) {
|
||||
if in.peekPrefix("//") {
|
||||
break
|
||||
}
|
||||
if in.peekPrefix("/*") {
|
||||
in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)"))
|
||||
}
|
||||
in.readRune()
|
||||
}
|
||||
return _IDENT
|
||||
}
|
||||
|
||||
// isIdent reports whether c is an identifier rune.
|
||||
// We treat nearly all runes as identifier runes.
|
||||
func isIdent(c int) bool {
|
||||
return c != 0 && !unicode.IsSpace(rune(c))
|
||||
}
|
||||
|
||||
// Comment assignment.
|
||||
// We build two lists of all subexpressions, preorder and postorder.
|
||||
// The preorder list is ordered by start location, with outer expressions first.
|
||||
// The postorder list is ordered by end location, with outer expressions last.
|
||||
// We use the preorder list to assign each whole-line comment to the syntax
|
||||
// immediately following it, and we use the postorder list to assign each
|
||||
// end-of-line comment to the syntax immediately preceding it.
|
||||
|
||||
// order walks the expression adding it and its subexpressions to the
|
||||
// preorder and postorder lists.
|
||||
func (in *input) order(x Expr) {
|
||||
if x != nil {
|
||||
in.pre = append(in.pre, x)
|
||||
}
|
||||
switch x := x.(type) {
|
||||
default:
|
||||
panic(fmt.Errorf("order: unexpected type %T", x))
|
||||
case nil:
|
||||
// nothing
|
||||
case *LParen, *RParen:
|
||||
// nothing
|
||||
case *CommentBlock:
|
||||
// nothing
|
||||
case *Line:
|
||||
// nothing
|
||||
case *FileSyntax:
|
||||
for _, stmt := range x.Stmt {
|
||||
in.order(stmt)
|
||||
}
|
||||
case *LineBlock:
|
||||
in.order(&x.LParen)
|
||||
for _, l := range x.Line {
|
||||
in.order(l)
|
||||
}
|
||||
in.order(&x.RParen)
|
||||
}
|
||||
if x != nil {
|
||||
in.post = append(in.post, x)
|
||||
}
|
||||
}
|
||||
|
||||
// assignComments attaches comments to nearby syntax.
|
||||
func (in *input) assignComments() {
|
||||
const debug = false
|
||||
|
||||
// Generate preorder and postorder lists.
|
||||
in.order(in.file)
|
||||
|
||||
// Split into whole-line comments and suffix comments.
|
||||
var line, suffix []Comment
|
||||
for _, com := range in.comments {
|
||||
if com.Suffix {
|
||||
suffix = append(suffix, com)
|
||||
} else {
|
||||
line = append(line, com)
|
||||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
for _, c := range line {
|
||||
fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
|
||||
}
|
||||
}
|
||||
|
||||
// Assign line comments to syntax immediately following.
|
||||
for _, x := range in.pre {
|
||||
start, _ := x.Span()
|
||||
if debug {
|
||||
fmt.Printf("pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte)
|
||||
}
|
||||
xcom := x.Comment()
|
||||
for len(line) > 0 && start.Byte >= line[0].Start.Byte {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte)
|
||||
}
|
||||
xcom.Before = append(xcom.Before, line[0])
|
||||
line = line[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// Remaining line comments go at end of file.
|
||||
in.file.After = append(in.file.After, line...)
|
||||
|
||||
if debug {
|
||||
for _, c := range suffix {
|
||||
fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
|
||||
}
|
||||
}
|
||||
|
||||
// Assign suffix comments to syntax immediately before.
|
||||
for i := len(in.post) - 1; i >= 0; i-- {
|
||||
x := in.post[i]
|
||||
|
||||
start, end := x.Span()
|
||||
if debug {
|
||||
fmt.Printf("post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte)
|
||||
}
|
||||
|
||||
// Do not assign suffix comments to end of line block or whole file.
|
||||
// Instead assign them to the last element inside.
|
||||
switch x.(type) {
|
||||
case *FileSyntax:
|
||||
continue
|
||||
}
|
||||
|
||||
// Do not assign suffix comments to something that starts
|
||||
// on an earlier line, so that in
|
||||
//
|
||||
// x ( y
|
||||
// z ) // comment
|
||||
//
|
||||
// we assign the comment to z and not to x ( ... ).
|
||||
if start.Line != end.Line {
|
||||
continue
|
||||
}
|
||||
xcom := x.Comment()
|
||||
for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte)
|
||||
}
|
||||
xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1])
|
||||
suffix = suffix[:len(suffix)-1]
|
||||
}
|
||||
}
|
||||
|
||||
// We assigned suffix comments in reverse.
|
||||
// If multiple suffix comments were appended to the same
|
||||
// expression node, they are now in reverse. Fix that.
|
||||
for _, x := range in.post {
|
||||
reverseComments(x.Comment().Suffix)
|
||||
}
|
||||
|
||||
// Remaining suffix comments go at beginning of file.
|
||||
in.file.Before = append(in.file.Before, suffix...)
|
||||
}
|
||||
|
||||
// reverseComments reverses the []Comment list.
|
||||
func reverseComments(list []Comment) {
|
||||
for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
|
||||
list[i], list[j] = list[j], list[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (in *input) parseFile() {
|
||||
in.file = new(FileSyntax)
|
||||
var sym symType
|
||||
var cb *CommentBlock
|
||||
for {
|
||||
tok := in.lex(&sym)
|
||||
switch tok {
|
||||
case '\n':
|
||||
if cb != nil {
|
||||
in.file.Stmt = append(in.file.Stmt, cb)
|
||||
cb = nil
|
||||
}
|
||||
case _COMMENT:
|
||||
if cb == nil {
|
||||
cb = &CommentBlock{Start: sym.pos}
|
||||
}
|
||||
com := cb.Comment()
|
||||
com.Before = append(com.Before, Comment{Start: sym.pos, Token: sym.text})
|
||||
case _EOF:
|
||||
if cb != nil {
|
||||
in.file.Stmt = append(in.file.Stmt, cb)
|
||||
}
|
||||
return
|
||||
default:
|
||||
in.parseStmt(&sym)
|
||||
if cb != nil {
|
||||
in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before
|
||||
cb = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (in *input) parseStmt(sym *symType) {
|
||||
start := sym.pos
|
||||
end := sym.endPos
|
||||
token := []string{sym.text}
|
||||
for {
|
||||
tok := in.lex(sym)
|
||||
switch tok {
|
||||
case '\n', _EOF, _EOL:
|
||||
in.file.Stmt = append(in.file.Stmt, &Line{
|
||||
Start: start,
|
||||
Token: token,
|
||||
End: end,
|
||||
})
|
||||
return
|
||||
case '(':
|
||||
in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, token, sym))
|
||||
return
|
||||
default:
|
||||
token = append(token, sym.text)
|
||||
end = sym.endPos
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (in *input) parseLineBlock(start Position, token []string, sym *symType) *LineBlock {
|
||||
x := &LineBlock{
|
||||
Start: start,
|
||||
Token: token,
|
||||
LParen: LParen{Pos: sym.pos},
|
||||
}
|
||||
var comments []Comment
|
||||
for {
|
||||
tok := in.lex(sym)
|
||||
switch tok {
|
||||
case _EOL:
|
||||
// ignore
|
||||
case '\n':
|
||||
if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" {
|
||||
comments = append(comments, Comment{})
|
||||
}
|
||||
case _COMMENT:
|
||||
comments = append(comments, Comment{Start: sym.pos, Token: sym.text})
|
||||
case _EOF:
|
||||
in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune))
|
||||
case ')':
|
||||
x.RParen.Before = comments
|
||||
x.RParen.Pos = sym.pos
|
||||
tok = in.lex(sym)
|
||||
if tok != '\n' && tok != _EOF && tok != _EOL {
|
||||
in.Error("syntax error (expected newline after closing paren)")
|
||||
}
|
||||
return x
|
||||
default:
|
||||
l := in.parseLine(sym)
|
||||
x.Line = append(x.Line, l)
|
||||
l.Comment().Before = comments
|
||||
comments = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (in *input) parseLine(sym *symType) *Line {
|
||||
start := sym.pos
|
||||
end := sym.endPos
|
||||
token := []string{sym.text}
|
||||
for {
|
||||
tok := in.lex(sym)
|
||||
switch tok {
|
||||
case '\n', _EOF, _EOL:
|
||||
return &Line{
|
||||
Start: start,
|
||||
Token: token,
|
||||
End: end,
|
||||
InBlock: true,
|
||||
}
|
||||
default:
|
||||
token = append(token, sym.text)
|
||||
end = sym.endPos
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
_EOF = -(1 + iota)
|
||||
_EOL
|
||||
_IDENT
|
||||
_STRING
|
||||
_COMMENT
|
||||
)
|
||||
|
||||
var (
|
||||
slashSlash = []byte("//")
|
||||
moduleStr = []byte("module")
|
||||
)
|
||||
|
||||
// ModulePath returns the module path from the gomod file text.
|
||||
// If it cannot find a module path, it returns an empty string.
|
||||
// It is tolerant of unrelated problems in the go.mod file.
|
||||
func ModulePath(mod []byte) string {
|
||||
for len(mod) > 0 {
|
||||
line := mod
|
||||
mod = nil
|
||||
if i := bytes.IndexByte(line, '\n'); i >= 0 {
|
||||
line, mod = line[:i], line[i+1:]
|
||||
}
|
||||
if i := bytes.Index(line, slashSlash); i >= 0 {
|
||||
line = line[:i]
|
||||
}
|
||||
line = bytes.TrimSpace(line)
|
||||
if !bytes.HasPrefix(line, moduleStr) {
|
||||
continue
|
||||
}
|
||||
line = line[len(moduleStr):]
|
||||
n := len(line)
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == n || len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if line[0] == '"' || line[0] == '`' {
|
||||
p, err := strconv.Unquote(string(line))
|
||||
if err != nil {
|
||||
return "" // malformed quoted string or multiline module path
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
return string(line)
|
||||
}
|
||||
return "" // missing module path
|
||||
}
|
724
vendor/github.com/rogpeppe/go-internal/modfile/rule.go
generated
vendored
Normal file
724
vendor/github.com/rogpeppe/go-internal/modfile/rule.go
generated
vendored
Normal file
@ -0,0 +1,724 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package modfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/rogpeppe/go-internal/module"
|
||||
"github.com/rogpeppe/go-internal/semver"
|
||||
)
|
||||
|
||||
// A File is the parsed, interpreted form of a go.mod file.
|
||||
type File struct {
|
||||
Module *Module
|
||||
Go *Go
|
||||
Require []*Require
|
||||
Exclude []*Exclude
|
||||
Replace []*Replace
|
||||
|
||||
Syntax *FileSyntax
|
||||
}
|
||||
|
||||
// A Module is the module statement.
|
||||
type Module struct {
|
||||
Mod module.Version
|
||||
Syntax *Line
|
||||
}
|
||||
|
||||
// A Go is the go statement.
|
||||
type Go struct {
|
||||
Version string // "1.23"
|
||||
Syntax *Line
|
||||
}
|
||||
|
||||
// A Require is a single require statement.
|
||||
type Require struct {
|
||||
Mod module.Version
|
||||
Indirect bool // has "// indirect" comment
|
||||
Syntax *Line
|
||||
}
|
||||
|
||||
// An Exclude is a single exclude statement.
|
||||
type Exclude struct {
|
||||
Mod module.Version
|
||||
Syntax *Line
|
||||
}
|
||||
|
||||
// A Replace is a single replace statement.
|
||||
type Replace struct {
|
||||
Old module.Version
|
||||
New module.Version
|
||||
Syntax *Line
|
||||
}
|
||||
|
||||
func (f *File) AddModuleStmt(path string) error {
|
||||
if f.Syntax == nil {
|
||||
f.Syntax = new(FileSyntax)
|
||||
}
|
||||
if f.Module == nil {
|
||||
f.Module = &Module{
|
||||
Mod: module.Version{Path: path},
|
||||
Syntax: f.Syntax.addLine(nil, "module", AutoQuote(path)),
|
||||
}
|
||||
} else {
|
||||
f.Module.Mod.Path = path
|
||||
f.Syntax.updateLine(f.Module.Syntax, "module", AutoQuote(path))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) AddComment(text string) {
|
||||
if f.Syntax == nil {
|
||||
f.Syntax = new(FileSyntax)
|
||||
}
|
||||
f.Syntax.Stmt = append(f.Syntax.Stmt, &CommentBlock{
|
||||
Comments: Comments{
|
||||
Before: []Comment{
|
||||
{
|
||||
Token: text,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type VersionFixer func(path, version string) (string, error)
|
||||
|
||||
// Parse parses the data, reported in errors as being from file,
|
||||
// into a File struct. It applies fix, if non-nil, to canonicalize all module versions found.
|
||||
func Parse(file string, data []byte, fix VersionFixer) (*File, error) {
|
||||
return parseToFile(file, data, fix, true)
|
||||
}
|
||||
|
||||
// ParseLax is like Parse but ignores unknown statements.
|
||||
// It is used when parsing go.mod files other than the main module,
|
||||
// under the theory that most statement types we add in the future will
|
||||
// only apply in the main module, like exclude and replace,
|
||||
// and so we get better gradual deployments if old go commands
|
||||
// simply ignore those statements when found in go.mod files
|
||||
// in dependencies.
|
||||
func ParseLax(file string, data []byte, fix VersionFixer) (*File, error) {
|
||||
return parseToFile(file, data, fix, false)
|
||||
}
|
||||
|
||||
func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (*File, error) {
|
||||
fs, err := parse(file, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &File{
|
||||
Syntax: fs,
|
||||
}
|
||||
|
||||
var errs bytes.Buffer
|
||||
for _, x := range fs.Stmt {
|
||||
switch x := x.(type) {
|
||||
case *Line:
|
||||
f.add(&errs, x, x.Token[0], x.Token[1:], fix, strict)
|
||||
|
||||
case *LineBlock:
|
||||
if len(x.Token) > 1 {
|
||||
if strict {
|
||||
fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " "))
|
||||
}
|
||||
continue
|
||||
}
|
||||
switch x.Token[0] {
|
||||
default:
|
||||
if strict {
|
||||
fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " "))
|
||||
}
|
||||
continue
|
||||
case "module", "require", "exclude", "replace":
|
||||
for _, l := range x.Line {
|
||||
f.add(&errs, l, x.Token[0], l.Token, fix, strict)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if errs.Len() > 0 {
|
||||
return nil, errors.New(strings.TrimRight(errs.String(), "\n"))
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
var goVersionRE = regexp.MustCompile(`([1-9][0-9]*)\.(0|[1-9][0-9]*)`)
|
||||
|
||||
func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, fix VersionFixer, strict bool) {
|
||||
// If strict is false, this module is a dependency.
|
||||
// We ignore all unknown directives as well as main-module-only
|
||||
// directives like replace and exclude. It will work better for
|
||||
// forward compatibility if we can depend on modules that have unknown
|
||||
// statements (presumed relevant only when acting as the main module)
|
||||
// and simply ignore those statements.
|
||||
if !strict {
|
||||
switch verb {
|
||||
case "module", "require", "go":
|
||||
// want these even for dependency go.mods
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch verb {
|
||||
default:
|
||||
fmt.Fprintf(errs, "%s:%d: unknown directive: %s\n", f.Syntax.Name, line.Start.Line, verb)
|
||||
|
||||
case "go":
|
||||
if f.Go != nil {
|
||||
fmt.Fprintf(errs, "%s:%d: repeated go statement\n", f.Syntax.Name, line.Start.Line)
|
||||
return
|
||||
}
|
||||
if len(args) != 1 || !goVersionRE.MatchString(args[0]) {
|
||||
fmt.Fprintf(errs, "%s:%d: usage: go 1.23\n", f.Syntax.Name, line.Start.Line)
|
||||
return
|
||||
}
|
||||
f.Go = &Go{Syntax: line}
|
||||
f.Go.Version = args[0]
|
||||
case "module":
|
||||
if f.Module != nil {
|
||||
fmt.Fprintf(errs, "%s:%d: repeated module statement\n", f.Syntax.Name, line.Start.Line)
|
||||
return
|
||||
}
|
||||
f.Module = &Module{Syntax: line}
|
||||
if len(args) != 1 {
|
||||
|
||||
fmt.Fprintf(errs, "%s:%d: usage: module module/path [version]\n", f.Syntax.Name, line.Start.Line)
|
||||
return
|
||||
}
|
||||
s, err := parseString(&args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
|
||||
return
|
||||
}
|
||||
f.Module.Mod = module.Version{Path: s}
|
||||
case "require", "exclude":
|
||||
if len(args) != 2 {
|
||||
fmt.Fprintf(errs, "%s:%d: usage: %s module/path v1.2.3\n", f.Syntax.Name, line.Start.Line, verb)
|
||||
return
|
||||
}
|
||||
s, err := parseString(&args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
|
||||
return
|
||||
}
|
||||
old := args[1]
|
||||
v, err := parseVersion(s, &args[1], fix)
|
||||
if err != nil {
|
||||
fmt.Fprintf(errs, "%s:%d: invalid module version %q: %v\n", f.Syntax.Name, line.Start.Line, old, err)
|
||||
return
|
||||
}
|
||||
pathMajor, err := modulePathMajor(s)
|
||||
if err != nil {
|
||||
fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
|
||||
return
|
||||
}
|
||||
if !module.MatchPathMajor(v, pathMajor) {
|
||||
if pathMajor == "" {
|
||||
pathMajor = "v0 or v1"
|
||||
}
|
||||
fmt.Fprintf(errs, "%s:%d: invalid module: %s should be %s, not %s (%s)\n", f.Syntax.Name, line.Start.Line, s, pathMajor, semver.Major(v), v)
|
||||
return
|
||||
}
|
||||
if verb == "require" {
|
||||
f.Require = append(f.Require, &Require{
|
||||
Mod: module.Version{Path: s, Version: v},
|
||||
Syntax: line,
|
||||
Indirect: isIndirect(line),
|
||||
})
|
||||
} else {
|
||||
f.Exclude = append(f.Exclude, &Exclude{
|
||||
Mod: module.Version{Path: s, Version: v},
|
||||
Syntax: line,
|
||||
})
|
||||
}
|
||||
case "replace":
|
||||
arrow := 2
|
||||
if len(args) >= 2 && args[1] == "=>" {
|
||||
arrow = 1
|
||||
}
|
||||
if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" {
|
||||
fmt.Fprintf(errs, "%s:%d: usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory\n", f.Syntax.Name, line.Start.Line, verb, verb)
|
||||
return
|
||||
}
|
||||
s, err := parseString(&args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
|
||||
return
|
||||
}
|
||||
pathMajor, err := modulePathMajor(s)
|
||||
if err != nil {
|
||||
fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
|
||||
return
|
||||
}
|
||||
var v string
|
||||
if arrow == 2 {
|
||||
old := args[1]
|
||||
v, err = parseVersion(s, &args[1], fix)
|
||||
if err != nil {
|
||||
fmt.Fprintf(errs, "%s:%d: invalid module version %v: %v\n", f.Syntax.Name, line.Start.Line, old, err)
|
||||
return
|
||||
}
|
||||
if !module.MatchPathMajor(v, pathMajor) {
|
||||
if pathMajor == "" {
|
||||
pathMajor = "v0 or v1"
|
||||
}
|
||||
fmt.Fprintf(errs, "%s:%d: invalid module: %s should be %s, not %s (%s)\n", f.Syntax.Name, line.Start.Line, s, pathMajor, semver.Major(v), v)
|
||||
return
|
||||
}
|
||||
}
|
||||
ns, err := parseString(&args[arrow+1])
|
||||
if err != nil {
|
||||
fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
|
||||
return
|
||||
}
|
||||
nv := ""
|
||||
if len(args) == arrow+2 {
|
||||
if !IsDirectoryPath(ns) {
|
||||
fmt.Fprintf(errs, "%s:%d: replacement module without version must be directory path (rooted or starting with ./ or ../)\n", f.Syntax.Name, line.Start.Line)
|
||||
return
|
||||
}
|
||||
if filepath.Separator == '/' && strings.Contains(ns, `\`) {
|
||||
fmt.Fprintf(errs, "%s:%d: replacement directory appears to be Windows path (on a non-windows system)\n", f.Syntax.Name, line.Start.Line)
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(args) == arrow+3 {
|
||||
old := args[arrow+1]
|
||||
nv, err = parseVersion(ns, &args[arrow+2], fix)
|
||||
if err != nil {
|
||||
fmt.Fprintf(errs, "%s:%d: invalid module version %v: %v\n", f.Syntax.Name, line.Start.Line, old, err)
|
||||
return
|
||||
}
|
||||
if IsDirectoryPath(ns) {
|
||||
fmt.Fprintf(errs, "%s:%d: replacement module directory path %q cannot have version\n", f.Syntax.Name, line.Start.Line, ns)
|
||||
return
|
||||
}
|
||||
}
|
||||
f.Replace = append(f.Replace, &Replace{
|
||||
Old: module.Version{Path: s, Version: v},
|
||||
New: module.Version{Path: ns, Version: nv},
|
||||
Syntax: line,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// isIndirect reports whether line has a "// indirect" comment,
|
||||
// meaning it is in go.mod only for its effect on indirect dependencies,
|
||||
// so that it can be dropped entirely once the effective version of the
|
||||
// indirect dependency reaches the given minimum version.
|
||||
func isIndirect(line *Line) bool {
|
||||
if len(line.Suffix) == 0 {
|
||||
return false
|
||||
}
|
||||
f := strings.Fields(line.Suffix[0].Token)
|
||||
return (len(f) == 2 && f[1] == "indirect" || len(f) > 2 && f[1] == "indirect;") && f[0] == "//"
|
||||
}
|
||||
|
||||
// setIndirect sets line to have (or not have) a "// indirect" comment.
|
||||
func setIndirect(line *Line, indirect bool) {
|
||||
if isIndirect(line) == indirect {
|
||||
return
|
||||
}
|
||||
if indirect {
|
||||
// Adding comment.
|
||||
if len(line.Suffix) == 0 {
|
||||
// New comment.
|
||||
line.Suffix = []Comment{{Token: "// indirect", Suffix: true}}
|
||||
return
|
||||
}
|
||||
// Insert at beginning of existing comment.
|
||||
com := &line.Suffix[0]
|
||||
space := " "
|
||||
if len(com.Token) > 2 && com.Token[2] == ' ' || com.Token[2] == '\t' {
|
||||
space = ""
|
||||
}
|
||||
com.Token = "// indirect;" + space + com.Token[2:]
|
||||
return
|
||||
}
|
||||
|
||||
// Removing comment.
|
||||
f := strings.Fields(line.Suffix[0].Token)
|
||||
if len(f) == 2 {
|
||||
// Remove whole comment.
|
||||
line.Suffix = nil
|
||||
return
|
||||
}
|
||||
|
||||
// Remove comment prefix.
|
||||
com := &line.Suffix[0]
|
||||
i := strings.Index(com.Token, "indirect;")
|
||||
com.Token = "//" + com.Token[i+len("indirect;"):]
|
||||
}
|
||||
|
||||
// IsDirectoryPath reports whether the given path should be interpreted
|
||||
// as a directory path. Just like on the go command line, relative paths
|
||||
// and rooted paths are directory paths; the rest are module paths.
|
||||
func IsDirectoryPath(ns string) bool {
|
||||
// Because go.mod files can move from one system to another,
|
||||
// we check all known path syntaxes, both Unix and Windows.
|
||||
return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") ||
|
||||
strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) ||
|
||||
len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':'
|
||||
}
|
||||
|
||||
// MustQuote reports whether s must be quoted in order to appear as
|
||||
// a single token in a go.mod line.
|
||||
func MustQuote(s string) bool {
|
||||
for _, r := range s {
|
||||
if !unicode.IsPrint(r) || r == ' ' || r == '"' || r == '\'' || r == '`' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return s == "" || strings.Contains(s, "//") || strings.Contains(s, "/*")
|
||||
}
|
||||
|
||||
// AutoQuote returns s or, if quoting is required for s to appear in a go.mod,
|
||||
// the quotation of s.
|
||||
func AutoQuote(s string) string {
|
||||
if MustQuote(s) {
|
||||
return strconv.Quote(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func parseString(s *string) (string, error) {
|
||||
t := *s
|
||||
if strings.HasPrefix(t, `"`) {
|
||||
var err error
|
||||
if t, err = strconv.Unquote(t); err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else if strings.ContainsAny(t, "\"'`") {
|
||||
// Other quotes are reserved both for possible future expansion
|
||||
// and to avoid confusion. For example if someone types 'x'
|
||||
// we want that to be a syntax error and not a literal x in literal quotation marks.
|
||||
return "", fmt.Errorf("unquoted string cannot contain quote")
|
||||
}
|
||||
*s = AutoQuote(t)
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func parseVersion(path string, s *string, fix VersionFixer) (string, error) {
|
||||
t, err := parseString(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if fix != nil {
|
||||
var err error
|
||||
t, err = fix(path, t)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if v := module.CanonicalVersion(t); v != "" {
|
||||
*s = v
|
||||
return *s, nil
|
||||
}
|
||||
return "", fmt.Errorf("version must be of the form v1.2.3")
|
||||
}
|
||||
|
||||
func modulePathMajor(path string) (string, error) {
|
||||
_, major, ok := module.SplitPathVersion(path)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("invalid module path")
|
||||
}
|
||||
return major, nil
|
||||
}
|
||||
|
||||
func (f *File) Format() ([]byte, error) {
|
||||
return Format(f.Syntax), nil
|
||||
}
|
||||
|
||||
// Cleanup cleans up the file f after any edit operations.
|
||||
// To avoid quadratic behavior, modifications like DropRequire
|
||||
// clear the entry but do not remove it from the slice.
|
||||
// Cleanup cleans out all the cleared entries.
|
||||
func (f *File) Cleanup() {
|
||||
w := 0
|
||||
for _, r := range f.Require {
|
||||
if r.Mod.Path != "" {
|
||||
f.Require[w] = r
|
||||
w++
|
||||
}
|
||||
}
|
||||
f.Require = f.Require[:w]
|
||||
|
||||
w = 0
|
||||
for _, x := range f.Exclude {
|
||||
if x.Mod.Path != "" {
|
||||
f.Exclude[w] = x
|
||||
w++
|
||||
}
|
||||
}
|
||||
f.Exclude = f.Exclude[:w]
|
||||
|
||||
w = 0
|
||||
for _, r := range f.Replace {
|
||||
if r.Old.Path != "" {
|
||||
f.Replace[w] = r
|
||||
w++
|
||||
}
|
||||
}
|
||||
f.Replace = f.Replace[:w]
|
||||
|
||||
f.Syntax.Cleanup()
|
||||
}
|
||||
|
||||
func (f *File) AddRequire(path, vers string) error {
|
||||
need := true
|
||||
for _, r := range f.Require {
|
||||
if r.Mod.Path == path {
|
||||
if need {
|
||||
r.Mod.Version = vers
|
||||
f.Syntax.updateLine(r.Syntax, "require", AutoQuote(path), vers)
|
||||
need = false
|
||||
} else {
|
||||
f.Syntax.removeLine(r.Syntax)
|
||||
*r = Require{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if need {
|
||||
f.AddNewRequire(path, vers, false)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) AddNewRequire(path, vers string, indirect bool) {
|
||||
line := f.Syntax.addLine(nil, "require", AutoQuote(path), vers)
|
||||
setIndirect(line, indirect)
|
||||
f.Require = append(f.Require, &Require{module.Version{Path: path, Version: vers}, indirect, line})
|
||||
}
|
||||
|
||||
func (f *File) SetRequire(req []*Require) {
|
||||
need := make(map[string]string)
|
||||
indirect := make(map[string]bool)
|
||||
for _, r := range req {
|
||||
need[r.Mod.Path] = r.Mod.Version
|
||||
indirect[r.Mod.Path] = r.Indirect
|
||||
}
|
||||
|
||||
for _, r := range f.Require {
|
||||
if v, ok := need[r.Mod.Path]; ok {
|
||||
r.Mod.Version = v
|
||||
r.Indirect = indirect[r.Mod.Path]
|
||||
}
|
||||
}
|
||||
|
||||
var newStmts []Expr
|
||||
for _, stmt := range f.Syntax.Stmt {
|
||||
switch stmt := stmt.(type) {
|
||||
case *LineBlock:
|
||||
if len(stmt.Token) > 0 && stmt.Token[0] == "require" {
|
||||
var newLines []*Line
|
||||
for _, line := range stmt.Line {
|
||||
if p, err := parseString(&line.Token[0]); err == nil && need[p] != "" {
|
||||
line.Token[1] = need[p]
|
||||
delete(need, p)
|
||||
setIndirect(line, indirect[p])
|
||||
newLines = append(newLines, line)
|
||||
}
|
||||
}
|
||||
if len(newLines) == 0 {
|
||||
continue // drop stmt
|
||||
}
|
||||
stmt.Line = newLines
|
||||
}
|
||||
|
||||
case *Line:
|
||||
if len(stmt.Token) > 0 && stmt.Token[0] == "require" {
|
||||
if p, err := parseString(&stmt.Token[1]); err == nil && need[p] != "" {
|
||||
stmt.Token[2] = need[p]
|
||||
delete(need, p)
|
||||
setIndirect(stmt, indirect[p])
|
||||
} else {
|
||||
continue // drop stmt
|
||||
}
|
||||
}
|
||||
}
|
||||
newStmts = append(newStmts, stmt)
|
||||
}
|
||||
f.Syntax.Stmt = newStmts
|
||||
|
||||
for path, vers := range need {
|
||||
f.AddNewRequire(path, vers, indirect[path])
|
||||
}
|
||||
f.SortBlocks()
|
||||
}
|
||||
|
||||
func (f *File) DropRequire(path string) error {
|
||||
for _, r := range f.Require {
|
||||
if r.Mod.Path == path {
|
||||
f.Syntax.removeLine(r.Syntax)
|
||||
*r = Require{}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) AddExclude(path, vers string) error {
|
||||
var hint *Line
|
||||
for _, x := range f.Exclude {
|
||||
if x.Mod.Path == path && x.Mod.Version == vers {
|
||||
return nil
|
||||
}
|
||||
if x.Mod.Path == path {
|
||||
hint = x.Syntax
|
||||
}
|
||||
}
|
||||
|
||||
f.Exclude = append(f.Exclude, &Exclude{Mod: module.Version{Path: path, Version: vers}, Syntax: f.Syntax.addLine(hint, "exclude", AutoQuote(path), vers)})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) DropExclude(path, vers string) error {
|
||||
for _, x := range f.Exclude {
|
||||
if x.Mod.Path == path && x.Mod.Version == vers {
|
||||
f.Syntax.removeLine(x.Syntax)
|
||||
*x = Exclude{}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error {
|
||||
need := true
|
||||
old := module.Version{Path: oldPath, Version: oldVers}
|
||||
new := module.Version{Path: newPath, Version: newVers}
|
||||
tokens := []string{"replace", AutoQuote(oldPath)}
|
||||
if oldVers != "" {
|
||||
tokens = append(tokens, oldVers)
|
||||
}
|
||||
tokens = append(tokens, "=>", AutoQuote(newPath))
|
||||
if newVers != "" {
|
||||
tokens = append(tokens, newVers)
|
||||
}
|
||||
|
||||
var hint *Line
|
||||
for _, r := range f.Replace {
|
||||
if r.Old.Path == oldPath && (oldVers == "" || r.Old.Version == oldVers) {
|
||||
if need {
|
||||
// Found replacement for old; update to use new.
|
||||
r.New = new
|
||||
f.Syntax.updateLine(r.Syntax, tokens...)
|
||||
need = false
|
||||
continue
|
||||
}
|
||||
// Already added; delete other replacements for same.
|
||||
f.Syntax.removeLine(r.Syntax)
|
||||
*r = Replace{}
|
||||
}
|
||||
if r.Old.Path == oldPath {
|
||||
hint = r.Syntax
|
||||
}
|
||||
}
|
||||
if need {
|
||||
f.Replace = append(f.Replace, &Replace{Old: old, New: new, Syntax: f.Syntax.addLine(hint, tokens...)})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) DropReplace(oldPath, oldVers string) error {
|
||||
for _, r := range f.Replace {
|
||||
if r.Old.Path == oldPath && r.Old.Version == oldVers {
|
||||
f.Syntax.removeLine(r.Syntax)
|
||||
*r = Replace{}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) SortBlocks() {
|
||||
f.removeDups() // otherwise sorting is unsafe
|
||||
|
||||
for _, stmt := range f.Syntax.Stmt {
|
||||
block, ok := stmt.(*LineBlock)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
sort.Slice(block.Line, func(i, j int) bool {
|
||||
li := block.Line[i]
|
||||
lj := block.Line[j]
|
||||
for k := 0; k < len(li.Token) && k < len(lj.Token); k++ {
|
||||
if li.Token[k] != lj.Token[k] {
|
||||
return li.Token[k] < lj.Token[k]
|
||||
}
|
||||
}
|
||||
return len(li.Token) < len(lj.Token)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (f *File) removeDups() {
|
||||
have := make(map[module.Version]bool)
|
||||
kill := make(map[*Line]bool)
|
||||
for _, x := range f.Exclude {
|
||||
if have[x.Mod] {
|
||||
kill[x.Syntax] = true
|
||||
continue
|
||||
}
|
||||
have[x.Mod] = true
|
||||
}
|
||||
var excl []*Exclude
|
||||
for _, x := range f.Exclude {
|
||||
if !kill[x.Syntax] {
|
||||
excl = append(excl, x)
|
||||
}
|
||||
}
|
||||
f.Exclude = excl
|
||||
|
||||
have = make(map[module.Version]bool)
|
||||
// Later replacements take priority over earlier ones.
|
||||
for i := len(f.Replace) - 1; i >= 0; i-- {
|
||||
x := f.Replace[i]
|
||||
if have[x.Old] {
|
||||
kill[x.Syntax] = true
|
||||
continue
|
||||
}
|
||||
have[x.Old] = true
|
||||
}
|
||||
var repl []*Replace
|
||||
for _, x := range f.Replace {
|
||||
if !kill[x.Syntax] {
|
||||
repl = append(repl, x)
|
||||
}
|
||||
}
|
||||
f.Replace = repl
|
||||
|
||||
var stmts []Expr
|
||||
for _, stmt := range f.Syntax.Stmt {
|
||||
switch stmt := stmt.(type) {
|
||||
case *Line:
|
||||
if kill[stmt] {
|
||||
continue
|
||||
}
|
||||
case *LineBlock:
|
||||
var lines []*Line
|
||||
for _, line := range stmt.Line {
|
||||
if !kill[line] {
|
||||
lines = append(lines, line)
|
||||
}
|
||||
}
|
||||
stmt.Line = lines
|
||||
if len(lines) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
stmts = append(stmts, stmt)
|
||||
}
|
||||
f.Syntax.Stmt = stmts
|
||||
}
|
540
vendor/github.com/rogpeppe/go-internal/module/module.go
generated
vendored
Normal file
540
vendor/github.com/rogpeppe/go-internal/module/module.go
generated
vendored
Normal file
@ -0,0 +1,540 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package module defines the module.Version type
|
||||
// along with support code.
|
||||
package module
|
||||
|
||||
// IMPORTANT NOTE
|
||||
//
|
||||
// This file essentially defines the set of valid import paths for the go command.
|
||||
// There are many subtle considerations, including Unicode ambiguity,
|
||||
// security, network, and file system representations.
|
||||
//
|
||||
// This file also defines the set of valid module path and version combinations,
|
||||
// another topic with many subtle considerations.
|
||||
//
|
||||
// Changes to the semantics in this file require approval from rsc.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/rogpeppe/go-internal/semver"
|
||||
)
|
||||
|
||||
// A Version is defined by a module path and version pair.
|
||||
type Version struct {
|
||||
Path string
|
||||
|
||||
// Version is usually a semantic version in canonical form.
|
||||
// There are two exceptions to this general rule.
|
||||
// First, the top-level target of a build has no specific version
|
||||
// and uses Version = "".
|
||||
// Second, during MVS calculations the version "none" is used
|
||||
// to represent the decision to take no version of a given module.
|
||||
Version string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Check checks that a given module path, version pair is valid.
|
||||
// In addition to the path being a valid module path
|
||||
// and the version being a valid semantic version,
|
||||
// the two must correspond.
|
||||
// For example, the path "yaml/v2" only corresponds to
|
||||
// semantic versions beginning with "v2.".
|
||||
func Check(path, version string) error {
|
||||
if err := CheckPath(path); err != nil {
|
||||
return err
|
||||
}
|
||||
if !semver.IsValid(version) {
|
||||
return fmt.Errorf("malformed semantic version %v", version)
|
||||
}
|
||||
_, pathMajor, _ := SplitPathVersion(path)
|
||||
if !MatchPathMajor(version, pathMajor) {
|
||||
if pathMajor == "" {
|
||||
pathMajor = "v0 or v1"
|
||||
}
|
||||
if pathMajor[0] == '.' { // .v1
|
||||
pathMajor = pathMajor[1:]
|
||||
}
|
||||
return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// firstPathOK reports whether r can appear in the first element of a module path.
|
||||
// The first element of the path must be an LDH domain name, at least for now.
|
||||
// To avoid case ambiguity, the domain name must be entirely lower case.
|
||||
func firstPathOK(r rune) bool {
|
||||
return r == '-' || r == '.' ||
|
||||
'0' <= r && r <= '9' ||
|
||||
'a' <= r && r <= 'z'
|
||||
}
|
||||
|
||||
// pathOK reports whether r can appear in an import path element.
|
||||
// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
|
||||
// This matches what "go get" has historically recognized in import paths.
|
||||
// TODO(rsc): We would like to allow Unicode letters, but that requires additional
|
||||
// care in the safe encoding (see note below).
|
||||
func pathOK(r rune) bool {
|
||||
if r < utf8.RuneSelf {
|
||||
return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' ||
|
||||
'0' <= r && r <= '9' ||
|
||||
'A' <= r && r <= 'Z' ||
|
||||
'a' <= r && r <= 'z'
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// fileNameOK reports whether r can appear in a file name.
|
||||
// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
|
||||
// If we expand the set of allowed characters here, we have to
|
||||
// work harder at detecting potential case-folding and normalization collisions.
|
||||
// See note about "safe encoding" below.
|
||||
func fileNameOK(r rune) bool {
|
||||
if r < utf8.RuneSelf {
|
||||
// Entire set of ASCII punctuation, from which we remove characters:
|
||||
// ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~
|
||||
// We disallow some shell special characters: " ' * < > ? ` |
|
||||
// (Note that some of those are disallowed by the Windows file system as well.)
|
||||
// We also disallow path separators / : and \ (fileNameOK is only called on path element characters).
|
||||
// We allow spaces (U+0020) in file names.
|
||||
const allowed = "!#$%&()+,-.=@[]^_{}~ "
|
||||
if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
|
||||
return true
|
||||
}
|
||||
for i := 0; i < len(allowed); i++ {
|
||||
if rune(allowed[i]) == r {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
// It may be OK to add more ASCII punctuation here, but only carefully.
|
||||
// For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
|
||||
return unicode.IsLetter(r)
|
||||
}
|
||||
|
||||
// CheckPath checks that a module path is valid.
|
||||
func CheckPath(path string) error {
|
||||
if err := checkPath(path, false); err != nil {
|
||||
return fmt.Errorf("malformed module path %q: %v", path, err)
|
||||
}
|
||||
i := strings.Index(path, "/")
|
||||
if i < 0 {
|
||||
i = len(path)
|
||||
}
|
||||
if i == 0 {
|
||||
return fmt.Errorf("malformed module path %q: leading slash", path)
|
||||
}
|
||||
if !strings.Contains(path[:i], ".") {
|
||||
return fmt.Errorf("malformed module path %q: missing dot in first path element", path)
|
||||
}
|
||||
if path[0] == '-' {
|
||||
return fmt.Errorf("malformed module path %q: leading dash in first path element", path)
|
||||
}
|
||||
for _, r := range path[:i] {
|
||||
if !firstPathOK(r) {
|
||||
return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r)
|
||||
}
|
||||
}
|
||||
if _, _, ok := SplitPathVersion(path); !ok {
|
||||
return fmt.Errorf("malformed module path %q: invalid version", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckImportPath checks that an import path is valid.
|
||||
func CheckImportPath(path string) error {
|
||||
if err := checkPath(path, false); err != nil {
|
||||
return fmt.Errorf("malformed import path %q: %v", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkPath checks that a general path is valid.
|
||||
// It returns an error describing why but not mentioning path.
|
||||
// Because these checks apply to both module paths and import paths,
|
||||
// the caller is expected to add the "malformed ___ path %q: " prefix.
|
||||
// fileName indicates whether the final element of the path is a file name
|
||||
// (as opposed to a directory name).
|
||||
func checkPath(path string, fileName bool) error {
|
||||
if !utf8.ValidString(path) {
|
||||
return fmt.Errorf("invalid UTF-8")
|
||||
}
|
||||
if path == "" {
|
||||
return fmt.Errorf("empty string")
|
||||
}
|
||||
if strings.Contains(path, "..") {
|
||||
return fmt.Errorf("double dot")
|
||||
}
|
||||
if strings.Contains(path, "//") {
|
||||
return fmt.Errorf("double slash")
|
||||
}
|
||||
if path[len(path)-1] == '/' {
|
||||
return fmt.Errorf("trailing slash")
|
||||
}
|
||||
elemStart := 0
|
||||
for i, r := range path {
|
||||
if r == '/' {
|
||||
if err := checkElem(path[elemStart:i], fileName); err != nil {
|
||||
return err
|
||||
}
|
||||
elemStart = i + 1
|
||||
}
|
||||
}
|
||||
if err := checkElem(path[elemStart:], fileName); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkElem checks whether an individual path element is valid.
|
||||
// fileName indicates whether the element is a file name (not a directory name).
|
||||
func checkElem(elem string, fileName bool) error {
|
||||
if elem == "" {
|
||||
return fmt.Errorf("empty path element")
|
||||
}
|
||||
if strings.Count(elem, ".") == len(elem) {
|
||||
return fmt.Errorf("invalid path element %q", elem)
|
||||
}
|
||||
if elem[0] == '.' && !fileName {
|
||||
return fmt.Errorf("leading dot in path element")
|
||||
}
|
||||
if elem[len(elem)-1] == '.' {
|
||||
return fmt.Errorf("trailing dot in path element")
|
||||
}
|
||||
charOK := pathOK
|
||||
if fileName {
|
||||
charOK = fileNameOK
|
||||
}
|
||||
for _, r := range elem {
|
||||
if !charOK(r) {
|
||||
return fmt.Errorf("invalid char %q", r)
|
||||
}
|
||||
}
|
||||
|
||||
// Windows disallows a bunch of path elements, sadly.
|
||||
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
|
||||
short := elem
|
||||
if i := strings.Index(short, "."); i >= 0 {
|
||||
short = short[:i]
|
||||
}
|
||||
for _, bad := range badWindowsNames {
|
||||
if strings.EqualFold(bad, short) {
|
||||
return fmt.Errorf("disallowed path element %q", elem)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckFilePath checks whether a slash-separated file path is valid.
|
||||
func CheckFilePath(path string) error {
|
||||
if err := checkPath(path, true); err != nil {
|
||||
return fmt.Errorf("malformed file path %q: %v", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// badWindowsNames are the reserved file path elements on Windows.
|
||||
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
|
||||
var badWindowsNames = []string{
|
||||
"CON",
|
||||
"PRN",
|
||||
"AUX",
|
||||
"NUL",
|
||||
"COM1",
|
||||
"COM2",
|
||||
"COM3",
|
||||
"COM4",
|
||||
"COM5",
|
||||
"COM6",
|
||||
"COM7",
|
||||
"COM8",
|
||||
"COM9",
|
||||
"LPT1",
|
||||
"LPT2",
|
||||
"LPT3",
|
||||
"LPT4",
|
||||
"LPT5",
|
||||
"LPT6",
|
||||
"LPT7",
|
||||
"LPT8",
|
||||
"LPT9",
|
||||
}
|
||||
|
||||
// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path
|
||||
// and version is either empty or "/vN" for N >= 2.
|
||||
// As a special case, gopkg.in paths are recognized directly;
|
||||
// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
|
||||
func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
|
||||
if strings.HasPrefix(path, "gopkg.in/") {
|
||||
return splitGopkgIn(path)
|
||||
}
|
||||
|
||||
i := len(path)
|
||||
dot := false
|
||||
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') {
|
||||
if path[i-1] == '.' {
|
||||
dot = true
|
||||
}
|
||||
i--
|
||||
}
|
||||
if i <= 1 || path[i-1] != 'v' || path[i-2] != '/' {
|
||||
return path, "", true
|
||||
}
|
||||
prefix, pathMajor = path[:i-2], path[i-2:]
|
||||
if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" {
|
||||
return path, "", false
|
||||
}
|
||||
return prefix, pathMajor, true
|
||||
}
|
||||
|
||||
// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths.
|
||||
func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
|
||||
if !strings.HasPrefix(path, "gopkg.in/") {
|
||||
return path, "", false
|
||||
}
|
||||
i := len(path)
|
||||
if strings.HasSuffix(path, "-unstable") {
|
||||
i -= len("-unstable")
|
||||
}
|
||||
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') {
|
||||
i--
|
||||
}
|
||||
if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' {
|
||||
// All gopkg.in paths must end in vN for some N.
|
||||
return path, "", false
|
||||
}
|
||||
prefix, pathMajor = path[:i-2], path[i-2:]
|
||||
if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" {
|
||||
return path, "", false
|
||||
}
|
||||
return prefix, pathMajor, true
|
||||
}
|
||||
|
||||
// MatchPathMajor reports whether the semantic version v
|
||||
// matches the path major version pathMajor.
|
||||
func MatchPathMajor(v, pathMajor string) bool {
|
||||
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
|
||||
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
|
||||
}
|
||||
if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" {
|
||||
// Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1.
|
||||
// For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405.
|
||||
return true
|
||||
}
|
||||
m := semver.Major(v)
|
||||
if pathMajor == "" {
|
||||
return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible"
|
||||
}
|
||||
return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:]
|
||||
}
|
||||
|
||||
// CanonicalVersion returns the canonical form of the version string v.
|
||||
// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible".
|
||||
func CanonicalVersion(v string) string {
|
||||
cv := semver.Canonical(v)
|
||||
if semver.Build(v) == "+incompatible" {
|
||||
cv += "+incompatible"
|
||||
}
|
||||
return cv
|
||||
}
|
||||
|
||||
// Sort sorts the list by Path, breaking ties by comparing Versions.
|
||||
func Sort(list []Version) {
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
mi := list[i]
|
||||
mj := list[j]
|
||||
if mi.Path != mj.Path {
|
||||
return mi.Path < mj.Path
|
||||
}
|
||||
// To help go.sum formatting, allow version/file.
|
||||
// Compare semver prefix by semver rules,
|
||||
// file by string order.
|
||||
vi := mi.Version
|
||||
vj := mj.Version
|
||||
var fi, fj string
|
||||
if k := strings.Index(vi, "/"); k >= 0 {
|
||||
vi, fi = vi[:k], vi[k:]
|
||||
}
|
||||
if k := strings.Index(vj, "/"); k >= 0 {
|
||||
vj, fj = vj[:k], vj[k:]
|
||||
}
|
||||
if vi != vj {
|
||||
return semver.Compare(vi, vj) < 0
|
||||
}
|
||||
return fi < fj
|
||||
})
|
||||
}
|
||||
|
||||
// Safe encodings
|
||||
//
|
||||
// Module paths appear as substrings of file system paths
|
||||
// (in the download cache) and of web server URLs in the proxy protocol.
|
||||
// In general we cannot rely on file systems to be case-sensitive,
|
||||
// nor can we rely on web servers, since they read from file systems.
|
||||
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
|
||||
// and rsc.io/quote separate. Windows and macOS don't.
|
||||
// Instead, we must never require two different casings of a file path.
|
||||
// Because we want the download cache to match the proxy protocol,
|
||||
// and because we want the proxy protocol to be possible to serve
|
||||
// from a tree of static files (which might be stored on a case-insensitive
|
||||
// file system), the proxy protocol must never require two different casings
|
||||
// of a URL path either.
|
||||
//
|
||||
// One possibility would be to make the safe encoding be the lowercase
|
||||
// hexadecimal encoding of the actual path bytes. This would avoid ever
|
||||
// needing different casings of a file path, but it would be fairly illegible
|
||||
// to most programmers when those paths appeared in the file system
|
||||
// (including in file paths in compiler errors and stack traces)
|
||||
// in web server logs, and so on. Instead, we want a safe encoding that
|
||||
// leaves most paths unaltered.
|
||||
//
|
||||
// The safe encoding is this:
|
||||
// replace every uppercase letter with an exclamation mark
|
||||
// followed by the letter's lowercase equivalent.
|
||||
//
|
||||
// For example,
|
||||
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
|
||||
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
|
||||
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
|
||||
//
|
||||
// Import paths that avoid upper-case letters are left unchanged.
|
||||
// Note that because import paths are ASCII-only and avoid various
|
||||
// problematic punctuation (like : < and >), the safe encoding is also ASCII-only
|
||||
// and avoids the same problematic punctuation.
|
||||
//
|
||||
// Import paths have never allowed exclamation marks, so there is no
|
||||
// need to define how to encode a literal !.
|
||||
//
|
||||
// Although paths are disallowed from using Unicode (see pathOK above),
|
||||
// the eventual plan is to allow Unicode letters as well, to assume that
|
||||
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
|
||||
// the !-for-uppercase convention. Note however that not all runes that
|
||||
// are different but case-fold equivalent are an upper/lower pair.
|
||||
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
|
||||
// are considered to case-fold to each other. When we do add Unicode
|
||||
// letters, we must not assume that upper/lower are the only case-equivalent pairs.
|
||||
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
|
||||
// Or perhaps it would encode as "!!k", or perhaps as "(212A)".
|
||||
//
|
||||
// Also, it would be nice to allow Unicode marks as well as letters,
|
||||
// but marks include combining marks, and then we must deal not
|
||||
// only with case folding but also normalization: both U+00E9 ('é')
|
||||
// and U+0065 U+0301 ('e' followed by combining acute accent)
|
||||
// look the same on the page and are treated by some file systems
|
||||
// as the same path. If we do allow Unicode marks in paths, there
|
||||
// must be some kind of normalization to allow only one canonical
|
||||
// encoding of any character used in an import path.
|
||||
|
||||
// EncodePath returns the safe encoding of the given module path.
|
||||
// It fails if the module path is invalid.
|
||||
func EncodePath(path string) (encoding string, err error) {
|
||||
if err := CheckPath(path); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return encodeString(path)
|
||||
}
|
||||
|
||||
// EncodeVersion returns the safe encoding of the given module version.
|
||||
// Versions are allowed to be in non-semver form but must be valid file names
|
||||
// and not contain exclamation marks.
|
||||
func EncodeVersion(v string) (encoding string, err error) {
|
||||
if err := checkElem(v, true); err != nil || strings.Contains(v, "!") {
|
||||
return "", fmt.Errorf("disallowed version string %q", v)
|
||||
}
|
||||
return encodeString(v)
|
||||
}
|
||||
|
||||
func encodeString(s string) (encoding string, err error) {
|
||||
haveUpper := false
|
||||
for _, r := range s {
|
||||
if r == '!' || r >= utf8.RuneSelf {
|
||||
// This should be disallowed by CheckPath, but diagnose anyway.
|
||||
// The correctness of the encoding loop below depends on it.
|
||||
return "", fmt.Errorf("internal error: inconsistency in EncodePath")
|
||||
}
|
||||
if 'A' <= r && r <= 'Z' {
|
||||
haveUpper = true
|
||||
}
|
||||
}
|
||||
|
||||
if !haveUpper {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
var buf []byte
|
||||
for _, r := range s {
|
||||
if 'A' <= r && r <= 'Z' {
|
||||
buf = append(buf, '!', byte(r+'a'-'A'))
|
||||
} else {
|
||||
buf = append(buf, byte(r))
|
||||
}
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// DecodePath returns the module path of the given safe encoding.
|
||||
// It fails if the encoding is invalid or encodes an invalid path.
|
||||
func DecodePath(encoding string) (path string, err error) {
|
||||
path, ok := decodeString(encoding)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("invalid module path encoding %q", encoding)
|
||||
}
|
||||
if err := CheckPath(path); err != nil {
|
||||
return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// DecodeVersion returns the version string for the given safe encoding.
|
||||
// It fails if the encoding is invalid or encodes an invalid version.
|
||||
// Versions are allowed to be in non-semver form but must be valid file names
|
||||
// and not contain exclamation marks.
|
||||
func DecodeVersion(encoding string) (v string, err error) {
|
||||
v, ok := decodeString(encoding)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("invalid version encoding %q", encoding)
|
||||
}
|
||||
if err := checkElem(v, true); err != nil {
|
||||
return "", fmt.Errorf("disallowed version string %q", v)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func decodeString(encoding string) (string, bool) {
|
||||
var buf []byte
|
||||
|
||||
bang := false
|
||||
for _, r := range encoding {
|
||||
if r >= utf8.RuneSelf {
|
||||
return "", false
|
||||
}
|
||||
if bang {
|
||||
bang = false
|
||||
if r < 'a' || 'z' < r {
|
||||
return "", false
|
||||
}
|
||||
buf = append(buf, byte(r+'A'-'a'))
|
||||
continue
|
||||
}
|
||||
if r == '!' {
|
||||
bang = true
|
||||
continue
|
||||
}
|
||||
if 'A' <= r && r <= 'Z' {
|
||||
return "", false
|
||||
}
|
||||
buf = append(buf, byte(r))
|
||||
}
|
||||
if bang {
|
||||
return "", false
|
||||
}
|
||||
return string(buf), true
|
||||
}
|
388
vendor/github.com/rogpeppe/go-internal/semver/semver.go
generated
vendored
Normal file
388
vendor/github.com/rogpeppe/go-internal/semver/semver.go
generated
vendored
Normal file
@ -0,0 +1,388 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package semver implements comparison of semantic version strings.
|
||||
// In this package, semantic version strings must begin with a leading "v",
|
||||
// as in "v1.0.0".
|
||||
//
|
||||
// The general form of a semantic version string accepted by this package is
|
||||
//
|
||||
// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
|
||||
//
|
||||
// where square brackets indicate optional parts of the syntax;
|
||||
// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
|
||||
// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
|
||||
// using only alphanumeric characters and hyphens; and
|
||||
// all-numeric PRERELEASE identifiers must not have leading zeros.
|
||||
//
|
||||
// This package follows Semantic Versioning 2.0.0 (see semver.org)
|
||||
// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
|
||||
// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
|
||||
// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
|
||||
package semver
|
||||
|
||||
// parsed returns the parsed form of a semantic version string.
|
||||
type parsed struct {
|
||||
major string
|
||||
minor string
|
||||
patch string
|
||||
short string
|
||||
prerelease string
|
||||
build string
|
||||
err string
|
||||
}
|
||||
|
||||
// IsValid reports whether v is a valid semantic version string.
|
||||
func IsValid(v string) bool {
|
||||
_, ok := parse(v)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Canonical returns the canonical formatting of the semantic version v.
|
||||
// It fills in any missing .MINOR or .PATCH and discards build metadata.
|
||||
// Two semantic versions compare equal only if their canonical formattings
|
||||
// are identical strings.
|
||||
// The canonical invalid semantic version is the empty string.
|
||||
func Canonical(v string) string {
|
||||
p, ok := parse(v)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
if p.build != "" {
|
||||
return v[:len(v)-len(p.build)]
|
||||
}
|
||||
if p.short != "" {
|
||||
return v + p.short
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Major returns the major version prefix of the semantic version v.
|
||||
// For example, Major("v2.1.0") == "v2".
|
||||
// If v is an invalid semantic version string, Major returns the empty string.
|
||||
func Major(v string) string {
|
||||
pv, ok := parse(v)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return v[:1+len(pv.major)]
|
||||
}
|
||||
|
||||
// MajorMinor returns the major.minor version prefix of the semantic version v.
|
||||
// For example, MajorMinor("v2.1.0") == "v2.1".
|
||||
// If v is an invalid semantic version string, MajorMinor returns the empty string.
|
||||
func MajorMinor(v string) string {
|
||||
pv, ok := parse(v)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
i := 1 + len(pv.major)
|
||||
if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
|
||||
return v[:j]
|
||||
}
|
||||
return v[:i] + "." + pv.minor
|
||||
}
|
||||
|
||||
// Prerelease returns the prerelease suffix of the semantic version v.
|
||||
// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
|
||||
// If v is an invalid semantic version string, Prerelease returns the empty string.
|
||||
func Prerelease(v string) string {
|
||||
pv, ok := parse(v)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return pv.prerelease
|
||||
}
|
||||
|
||||
// Build returns the build suffix of the semantic version v.
|
||||
// For example, Build("v2.1.0+meta") == "+meta".
|
||||
// If v is an invalid semantic version string, Build returns the empty string.
|
||||
func Build(v string) string {
|
||||
pv, ok := parse(v)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return pv.build
|
||||
}
|
||||
|
||||
// Compare returns an integer comparing two versions according to
|
||||
// according to semantic version precedence.
|
||||
// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
|
||||
//
|
||||
// An invalid semantic version string is considered less than a valid one.
|
||||
// All invalid semantic version strings compare equal to each other.
|
||||
func Compare(v, w string) int {
|
||||
pv, ok1 := parse(v)
|
||||
pw, ok2 := parse(w)
|
||||
if !ok1 && !ok2 {
|
||||
return 0
|
||||
}
|
||||
if !ok1 {
|
||||
return -1
|
||||
}
|
||||
if !ok2 {
|
||||
return +1
|
||||
}
|
||||
if c := compareInt(pv.major, pw.major); c != 0 {
|
||||
return c
|
||||
}
|
||||
if c := compareInt(pv.minor, pw.minor); c != 0 {
|
||||
return c
|
||||
}
|
||||
if c := compareInt(pv.patch, pw.patch); c != 0 {
|
||||
return c
|
||||
}
|
||||
return comparePrerelease(pv.prerelease, pw.prerelease)
|
||||
}
|
||||
|
||||
// Max canonicalizes its arguments and then returns the version string
|
||||
// that compares greater.
|
||||
func Max(v, w string) string {
|
||||
v = Canonical(v)
|
||||
w = Canonical(w)
|
||||
if Compare(v, w) > 0 {
|
||||
return v
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
func parse(v string) (p parsed, ok bool) {
|
||||
if v == "" || v[0] != 'v' {
|
||||
p.err = "missing v prefix"
|
||||
return
|
||||
}
|
||||
p.major, v, ok = parseInt(v[1:])
|
||||
if !ok {
|
||||
p.err = "bad major version"
|
||||
return
|
||||
}
|
||||
if v == "" {
|
||||
p.minor = "0"
|
||||
p.patch = "0"
|
||||
p.short = ".0.0"
|
||||
return
|
||||
}
|
||||
if v[0] != '.' {
|
||||
p.err = "bad minor prefix"
|
||||
ok = false
|
||||
return
|
||||
}
|
||||
p.minor, v, ok = parseInt(v[1:])
|
||||
if !ok {
|
||||
p.err = "bad minor version"
|
||||
return
|
||||
}
|
||||
if v == "" {
|
||||
p.patch = "0"
|
||||
p.short = ".0"
|
||||
return
|
||||
}
|
||||
if v[0] != '.' {
|
||||
p.err = "bad patch prefix"
|
||||
ok = false
|
||||
return
|
||||
}
|
||||
p.patch, v, ok = parseInt(v[1:])
|
||||
if !ok {
|
||||
p.err = "bad patch version"
|
||||
return
|
||||
}
|
||||
if len(v) > 0 && v[0] == '-' {
|
||||
p.prerelease, v, ok = parsePrerelease(v)
|
||||
if !ok {
|
||||
p.err = "bad prerelease"
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(v) > 0 && v[0] == '+' {
|
||||
p.build, v, ok = parseBuild(v)
|
||||
if !ok {
|
||||
p.err = "bad build"
|
||||
return
|
||||
}
|
||||
}
|
||||
if v != "" {
|
||||
p.err = "junk on end"
|
||||
ok = false
|
||||
return
|
||||
}
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
func parseInt(v string) (t, rest string, ok bool) {
|
||||
if v == "" {
|
||||
return
|
||||
}
|
||||
if v[0] < '0' || '9' < v[0] {
|
||||
return
|
||||
}
|
||||
i := 1
|
||||
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
|
||||
i++
|
||||
}
|
||||
if v[0] == '0' && i != 1 {
|
||||
return
|
||||
}
|
||||
return v[:i], v[i:], true
|
||||
}
|
||||
|
||||
func parsePrerelease(v string) (t, rest string, ok bool) {
|
||||
// "A pre-release version MAY be denoted by appending a hyphen and
|
||||
// a series of dot separated identifiers immediately following the patch version.
|
||||
// Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
|
||||
// Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
|
||||
if v == "" || v[0] != '-' {
|
||||
return
|
||||
}
|
||||
i := 1
|
||||
start := 1
|
||||
for i < len(v) && v[i] != '+' {
|
||||
if !isIdentChar(v[i]) && v[i] != '.' {
|
||||
return
|
||||
}
|
||||
if v[i] == '.' {
|
||||
if start == i || isBadNum(v[start:i]) {
|
||||
return
|
||||
}
|
||||
start = i + 1
|
||||
}
|
||||
i++
|
||||
}
|
||||
if start == i || isBadNum(v[start:i]) {
|
||||
return
|
||||
}
|
||||
return v[:i], v[i:], true
|
||||
}
|
||||
|
||||
func parseBuild(v string) (t, rest string, ok bool) {
|
||||
if v == "" || v[0] != '+' {
|
||||
return
|
||||
}
|
||||
i := 1
|
||||
start := 1
|
||||
for i < len(v) {
|
||||
if !isIdentChar(v[i]) {
|
||||
return
|
||||
}
|
||||
if v[i] == '.' {
|
||||
if start == i {
|
||||
return
|
||||
}
|
||||
start = i + 1
|
||||
}
|
||||
i++
|
||||
}
|
||||
if start == i {
|
||||
return
|
||||
}
|
||||
return v[:i], v[i:], true
|
||||
}
|
||||
|
||||
func isIdentChar(c byte) bool {
|
||||
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
|
||||
}
|
||||
|
||||
func isBadNum(v string) bool {
|
||||
i := 0
|
||||
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
|
||||
i++
|
||||
}
|
||||
return i == len(v) && i > 1 && v[0] == '0'
|
||||
}
|
||||
|
||||
func isNum(v string) bool {
|
||||
i := 0
|
||||
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
|
||||
i++
|
||||
}
|
||||
return i == len(v)
|
||||
}
|
||||
|
||||
func compareInt(x, y string) int {
|
||||
if x == y {
|
||||
return 0
|
||||
}
|
||||
if len(x) < len(y) {
|
||||
return -1
|
||||
}
|
||||
if len(x) > len(y) {
|
||||
return +1
|
||||
}
|
||||
if x < y {
|
||||
return -1
|
||||
} else {
|
||||
return +1
|
||||
}
|
||||
}
|
||||
|
||||
func comparePrerelease(x, y string) int {
|
||||
// "When major, minor, and patch are equal, a pre-release version has
|
||||
// lower precedence than a normal version.
|
||||
// Example: 1.0.0-alpha < 1.0.0.
|
||||
// Precedence for two pre-release versions with the same major, minor,
|
||||
// and patch version MUST be determined by comparing each dot separated
|
||||
// identifier from left to right until a difference is found as follows:
|
||||
// identifiers consisting of only digits are compared numerically and
|
||||
// identifiers with letters or hyphens are compared lexically in ASCII
|
||||
// sort order. Numeric identifiers always have lower precedence than
|
||||
// non-numeric identifiers. A larger set of pre-release fields has a
|
||||
// higher precedence than a smaller set, if all of the preceding
|
||||
// identifiers are equal.
|
||||
// Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
|
||||
// 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
|
||||
if x == y {
|
||||
return 0
|
||||
}
|
||||
if x == "" {
|
||||
return +1
|
||||
}
|
||||
if y == "" {
|
||||
return -1
|
||||
}
|
||||
for x != "" && y != "" {
|
||||
x = x[1:] // skip - or .
|
||||
y = y[1:] // skip - or .
|
||||
var dx, dy string
|
||||
dx, x = nextIdent(x)
|
||||
dy, y = nextIdent(y)
|
||||
if dx != dy {
|
||||
ix := isNum(dx)
|
||||
iy := isNum(dy)
|
||||
if ix != iy {
|
||||
if ix {
|
||||
return -1
|
||||
} else {
|
||||
return +1
|
||||
}
|
||||
}
|
||||
if ix {
|
||||
if len(dx) < len(dy) {
|
||||
return -1
|
||||
}
|
||||
if len(dx) > len(dy) {
|
||||
return +1
|
||||
}
|
||||
}
|
||||
if dx < dy {
|
||||
return -1
|
||||
} else {
|
||||
return +1
|
||||
}
|
||||
}
|
||||
}
|
||||
if x == "" {
|
||||
return -1
|
||||
} else {
|
||||
return +1
|
||||
}
|
||||
}
|
||||
|
||||
func nextIdent(x string) (dx, rest string) {
|
||||
i := 0
|
||||
for i < len(x) && x[i] != '.' {
|
||||
i++
|
||||
}
|
||||
return x[:i], x[i:]
|
||||
}
|
Reference in New Issue
Block a user