mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
build: move e2e dependencies into e2e/go.mod
Several packages are only used while running the e2e suite. These packages are less important to update, as the they can not influence the final executable that is part of the Ceph-CSI container-image. By moving these dependencies out of the main Ceph-CSI go.mod, it is easier to identify if a reported CVE affects Ceph-CSI, or only the testing (like most of the Kubernetes CVEs). Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
15da101b1b
commit
bec6090996
14
e2e/vendor/github.com/JeffAshton/win_pdh/AUTHORS
generated
vendored
Normal file
14
e2e/vendor/github.com/JeffAshton/win_pdh/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
# This is the official list of 'win_pdh' authors for copyright purposes.
|
||||
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
# Contributors
|
||||
# ============
|
||||
|
||||
Alexander Neumann <an2048@googlemail.com>
|
||||
Joseph Watson <jtwatson@linux-consulting.us>
|
||||
Kevin Pors <krpors@gmail.com>
|
23
e2e/vendor/github.com/JeffAshton/win_pdh/LICENSE
generated
vendored
Normal file
23
e2e/vendor/github.com/JeffAshton/win_pdh/LICENSE
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
Copyright (c) 2010 The win_pdh Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The names of the authors may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
15
e2e/vendor/github.com/JeffAshton/win_pdh/README.mdown
generated
vendored
Normal file
15
e2e/vendor/github.com/JeffAshton/win_pdh/README.mdown
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
About win_pdh
|
||||
=============
|
||||
|
||||
win_pdh is a Windows Performance Data Helper wrapper package for Go.
|
||||
|
||||
Originally part of [walk](https://github.com/lxn/walk) and [win](https://github.com/lxn/win), it is now a separate
|
||||
project.
|
||||
|
||||
Setup
|
||||
=====
|
||||
|
||||
Make sure you have a working Go installation.
|
||||
See [Getting Started](http://golang.org/doc/install.html)
|
||||
|
||||
Now run `go get github.com/JeffAshton/win_pdh`
|
453
e2e/vendor/github.com/JeffAshton/win_pdh/pdh.go
generated
vendored
Normal file
453
e2e/vendor/github.com/JeffAshton/win_pdh/pdh.go
generated
vendored
Normal file
@ -0,0 +1,453 @@
|
||||
// Copyright 2013 The win_pdh Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package win_pdh
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Error codes
|
||||
const (
|
||||
ERROR_SUCCESS = 0
|
||||
ERROR_INVALID_FUNCTION = 1
|
||||
)
|
||||
|
||||
type (
|
||||
HANDLE uintptr
|
||||
)
|
||||
|
||||
// PDH error codes, which can be returned by all Pdh* functions. Taken from mingw-w64 pdhmsg.h
|
||||
const (
|
||||
PDH_CSTATUS_VALID_DATA = 0x00000000 // The returned data is valid.
|
||||
PDH_CSTATUS_NEW_DATA = 0x00000001 // The return data value is valid and different from the last sample.
|
||||
PDH_CSTATUS_NO_MACHINE = 0x800007D0 // Unable to connect to the specified computer, or the computer is offline.
|
||||
PDH_CSTATUS_NO_INSTANCE = 0x800007D1
|
||||
PDH_MORE_DATA = 0x800007D2 // The PdhGetFormattedCounterArray* function can return this if there's 'more data to be displayed'.
|
||||
PDH_CSTATUS_ITEM_NOT_VALIDATED = 0x800007D3
|
||||
PDH_RETRY = 0x800007D4
|
||||
PDH_NO_DATA = 0x800007D5 // The query does not currently contain any counters (for example, limited access)
|
||||
PDH_CALC_NEGATIVE_DENOMINATOR = 0x800007D6
|
||||
PDH_CALC_NEGATIVE_TIMEBASE = 0x800007D7
|
||||
PDH_CALC_NEGATIVE_VALUE = 0x800007D8
|
||||
PDH_DIALOG_CANCELLED = 0x800007D9
|
||||
PDH_END_OF_LOG_FILE = 0x800007DA
|
||||
PDH_ASYNC_QUERY_TIMEOUT = 0x800007DB
|
||||
PDH_CANNOT_SET_DEFAULT_REALTIME_DATASOURCE = 0x800007DC
|
||||
PDH_CSTATUS_NO_OBJECT = 0xC0000BB8
|
||||
PDH_CSTATUS_NO_COUNTER = 0xC0000BB9 // The specified counter could not be found.
|
||||
PDH_CSTATUS_INVALID_DATA = 0xC0000BBA // The counter was successfully found, but the data returned is not valid.
|
||||
PDH_MEMORY_ALLOCATION_FAILURE = 0xC0000BBB
|
||||
PDH_INVALID_HANDLE = 0xC0000BBC
|
||||
PDH_INVALID_ARGUMENT = 0xC0000BBD // Required argument is missing or incorrect.
|
||||
PDH_FUNCTION_NOT_FOUND = 0xC0000BBE
|
||||
PDH_CSTATUS_NO_COUNTERNAME = 0xC0000BBF
|
||||
PDH_CSTATUS_BAD_COUNTERNAME = 0xC0000BC0 // Unable to parse the counter path. Check the format and syntax of the specified path.
|
||||
PDH_INVALID_BUFFER = 0xC0000BC1
|
||||
PDH_INSUFFICIENT_BUFFER = 0xC0000BC2
|
||||
PDH_CANNOT_CONNECT_MACHINE = 0xC0000BC3
|
||||
PDH_INVALID_PATH = 0xC0000BC4
|
||||
PDH_INVALID_INSTANCE = 0xC0000BC5
|
||||
PDH_INVALID_DATA = 0xC0000BC6 // specified counter does not contain valid data or a successful status code.
|
||||
PDH_NO_DIALOG_DATA = 0xC0000BC7
|
||||
PDH_CANNOT_READ_NAME_STRINGS = 0xC0000BC8
|
||||
PDH_LOG_FILE_CREATE_ERROR = 0xC0000BC9
|
||||
PDH_LOG_FILE_OPEN_ERROR = 0xC0000BCA
|
||||
PDH_LOG_TYPE_NOT_FOUND = 0xC0000BCB
|
||||
PDH_NO_MORE_DATA = 0xC0000BCC
|
||||
PDH_ENTRY_NOT_IN_LOG_FILE = 0xC0000BCD
|
||||
PDH_DATA_SOURCE_IS_LOG_FILE = 0xC0000BCE
|
||||
PDH_DATA_SOURCE_IS_REAL_TIME = 0xC0000BCF
|
||||
PDH_UNABLE_READ_LOG_HEADER = 0xC0000BD0
|
||||
PDH_FILE_NOT_FOUND = 0xC0000BD1
|
||||
PDH_FILE_ALREADY_EXISTS = 0xC0000BD2
|
||||
PDH_NOT_IMPLEMENTED = 0xC0000BD3
|
||||
PDH_STRING_NOT_FOUND = 0xC0000BD4
|
||||
PDH_UNABLE_MAP_NAME_FILES = 0x80000BD5
|
||||
PDH_UNKNOWN_LOG_FORMAT = 0xC0000BD6
|
||||
PDH_UNKNOWN_LOGSVC_COMMAND = 0xC0000BD7
|
||||
PDH_LOGSVC_QUERY_NOT_FOUND = 0xC0000BD8
|
||||
PDH_LOGSVC_NOT_OPENED = 0xC0000BD9
|
||||
PDH_WBEM_ERROR = 0xC0000BDA
|
||||
PDH_ACCESS_DENIED = 0xC0000BDB
|
||||
PDH_LOG_FILE_TOO_SMALL = 0xC0000BDC
|
||||
PDH_INVALID_DATASOURCE = 0xC0000BDD
|
||||
PDH_INVALID_SQLDB = 0xC0000BDE
|
||||
PDH_NO_COUNTERS = 0xC0000BDF
|
||||
PDH_SQL_ALLOC_FAILED = 0xC0000BE0
|
||||
PDH_SQL_ALLOCCON_FAILED = 0xC0000BE1
|
||||
PDH_SQL_EXEC_DIRECT_FAILED = 0xC0000BE2
|
||||
PDH_SQL_FETCH_FAILED = 0xC0000BE3
|
||||
PDH_SQL_ROWCOUNT_FAILED = 0xC0000BE4
|
||||
PDH_SQL_MORE_RESULTS_FAILED = 0xC0000BE5
|
||||
PDH_SQL_CONNECT_FAILED = 0xC0000BE6
|
||||
PDH_SQL_BIND_FAILED = 0xC0000BE7
|
||||
PDH_CANNOT_CONNECT_WMI_SERVER = 0xC0000BE8
|
||||
PDH_PLA_COLLECTION_ALREADY_RUNNING = 0xC0000BE9
|
||||
PDH_PLA_ERROR_SCHEDULE_OVERLAP = 0xC0000BEA
|
||||
PDH_PLA_COLLECTION_NOT_FOUND = 0xC0000BEB
|
||||
PDH_PLA_ERROR_SCHEDULE_ELAPSED = 0xC0000BEC
|
||||
PDH_PLA_ERROR_NOSTART = 0xC0000BED
|
||||
PDH_PLA_ERROR_ALREADY_EXISTS = 0xC0000BEE
|
||||
PDH_PLA_ERROR_TYPE_MISMATCH = 0xC0000BEF
|
||||
PDH_PLA_ERROR_FILEPATH = 0xC0000BF0
|
||||
PDH_PLA_SERVICE_ERROR = 0xC0000BF1
|
||||
PDH_PLA_VALIDATION_ERROR = 0xC0000BF2
|
||||
PDH_PLA_VALIDATION_WARNING = 0x80000BF3
|
||||
PDH_PLA_ERROR_NAME_TOO_LONG = 0xC0000BF4
|
||||
PDH_INVALID_SQL_LOG_FORMAT = 0xC0000BF5
|
||||
PDH_COUNTER_ALREADY_IN_QUERY = 0xC0000BF6
|
||||
PDH_BINARY_LOG_CORRUPT = 0xC0000BF7
|
||||
PDH_LOG_SAMPLE_TOO_SMALL = 0xC0000BF8
|
||||
PDH_OS_LATER_VERSION = 0xC0000BF9
|
||||
PDH_OS_EARLIER_VERSION = 0xC0000BFA
|
||||
PDH_INCORRECT_APPEND_TIME = 0xC0000BFB
|
||||
PDH_UNMATCHED_APPEND_COUNTER = 0xC0000BFC
|
||||
PDH_SQL_ALTER_DETAIL_FAILED = 0xC0000BFD
|
||||
PDH_QUERY_PERF_DATA_TIMEOUT = 0xC0000BFE
|
||||
)
|
||||
|
||||
// Formatting options for GetFormattedCounterValue().
|
||||
const (
|
||||
PDH_FMT_RAW = 0x00000010
|
||||
PDH_FMT_ANSI = 0x00000020
|
||||
PDH_FMT_UNICODE = 0x00000040
|
||||
PDH_FMT_LONG = 0x00000100 // Return data as a long int.
|
||||
PDH_FMT_DOUBLE = 0x00000200 // Return data as a double precision floating point real.
|
||||
PDH_FMT_LARGE = 0x00000400 // Return data as a 64 bit integer.
|
||||
PDH_FMT_NOSCALE = 0x00001000 // can be OR-ed: Do not apply the counter's default scaling factor.
|
||||
PDH_FMT_1000 = 0x00002000 // can be OR-ed: multiply the actual value by 1,000.
|
||||
PDH_FMT_NODATA = 0x00004000 // can be OR-ed: unknown what this is for, MSDN says nothing.
|
||||
PDH_FMT_NOCAP100 = 0x00008000 // can be OR-ed: do not cap values > 100.
|
||||
PERF_DETAIL_COSTLY = 0x00010000
|
||||
PERF_DETAIL_STANDARD = 0x0000FFFF
|
||||
)
|
||||
|
||||
type (
|
||||
PDH_HQUERY HANDLE // query handle
|
||||
PDH_HCOUNTER HANDLE // counter handle
|
||||
)
|
||||
|
||||
// Union specialization for double values
|
||||
type PDH_FMT_COUNTERVALUE_DOUBLE struct {
|
||||
CStatus uint32
|
||||
DoubleValue float64
|
||||
}
|
||||
|
||||
// Union specialization for 64 bit integer values
|
||||
type PDH_FMT_COUNTERVALUE_LARGE struct {
|
||||
CStatus uint32
|
||||
LargeValue int64
|
||||
}
|
||||
|
||||
// Union specialization for long values
|
||||
type PDH_FMT_COUNTERVALUE_LONG struct {
|
||||
CStatus uint32
|
||||
LongValue int32
|
||||
padding [4]byte
|
||||
}
|
||||
|
||||
// Union specialization for double values, used by PdhGetFormattedCounterArrayDouble()
|
||||
type PDH_FMT_COUNTERVALUE_ITEM_DOUBLE struct {
|
||||
SzName *uint16 // pointer to a string
|
||||
FmtValue PDH_FMT_COUNTERVALUE_DOUBLE
|
||||
}
|
||||
|
||||
// Union specialization for 'large' values, used by PdhGetFormattedCounterArrayLarge()
|
||||
type PDH_FMT_COUNTERVALUE_ITEM_LARGE struct {
|
||||
SzName *uint16 // pointer to a string
|
||||
FmtValue PDH_FMT_COUNTERVALUE_LARGE
|
||||
}
|
||||
|
||||
// Union specialization for long values, used by PdhGetFormattedCounterArrayLong()
|
||||
type PDH_FMT_COUNTERVALUE_ITEM_LONG struct {
|
||||
SzName *uint16 // pointer to a string
|
||||
FmtValue PDH_FMT_COUNTERVALUE_LONG
|
||||
}
|
||||
|
||||
var (
|
||||
// Library
|
||||
libpdhDll *syscall.DLL
|
||||
|
||||
// Functions
|
||||
pdh_AddCounterW *syscall.Proc
|
||||
pdh_AddEnglishCounterW *syscall.Proc
|
||||
pdh_CloseQuery *syscall.Proc
|
||||
pdh_CollectQueryData *syscall.Proc
|
||||
pdh_GetFormattedCounterValue *syscall.Proc
|
||||
pdh_GetFormattedCounterArrayW *syscall.Proc
|
||||
pdh_OpenQuery *syscall.Proc
|
||||
pdh_ValidatePathW *syscall.Proc
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Library
|
||||
libpdhDll = syscall.MustLoadDLL("pdh.dll")
|
||||
|
||||
// Functions
|
||||
pdh_AddCounterW = libpdhDll.MustFindProc("PdhAddCounterW")
|
||||
pdh_AddEnglishCounterW, _ = libpdhDll.FindProc("PdhAddEnglishCounterW") // XXX: only supported on versions > Vista.
|
||||
pdh_CloseQuery = libpdhDll.MustFindProc("PdhCloseQuery")
|
||||
pdh_CollectQueryData = libpdhDll.MustFindProc("PdhCollectQueryData")
|
||||
pdh_GetFormattedCounterValue = libpdhDll.MustFindProc("PdhGetFormattedCounterValue")
|
||||
pdh_GetFormattedCounterArrayW = libpdhDll.MustFindProc("PdhGetFormattedCounterArrayW")
|
||||
pdh_OpenQuery = libpdhDll.MustFindProc("PdhOpenQuery")
|
||||
pdh_ValidatePathW = libpdhDll.MustFindProc("PdhValidatePathW")
|
||||
}
|
||||
|
||||
// Adds the specified counter to the query. This is the internationalized version. Preferably, use the
|
||||
// function PdhAddEnglishCounter instead. hQuery is the query handle, which has been fetched by PdhOpenQuery.
|
||||
// szFullCounterPath is a full, internationalized counter path (this will differ per Windows language version).
|
||||
// dwUserData is a 'user-defined value', which becomes part of the counter information. To retrieve this value
|
||||
// later, call PdhGetCounterInfo() and access dwQueryUserData of the PDH_COUNTER_INFO structure.
|
||||
//
|
||||
// Examples of szFullCounterPath (in an English version of Windows):
|
||||
//
|
||||
// \\Processor(_Total)\\% Idle Time
|
||||
// \\Processor(_Total)\\% Processor Time
|
||||
// \\LogicalDisk(C:)\% Free Space
|
||||
//
|
||||
// To view all (internationalized...) counters on a system, there are three non-programmatic ways: perfmon utility,
|
||||
// the typeperf command, and the the registry editor. perfmon.exe is perhaps the easiest way, because it's basically a
|
||||
// full implemention of the pdh.dll API, except with a GUI and all that. The registry setting also provides an
|
||||
// interface to the available counters, and can be found at the following key:
|
||||
//
|
||||
// HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib\CurrentLanguage
|
||||
//
|
||||
// This registry key contains several values as follows:
|
||||
//
|
||||
// 1
|
||||
// 1847
|
||||
// 2
|
||||
// System
|
||||
// 4
|
||||
// Memory
|
||||
// 6
|
||||
// % Processor Time
|
||||
// ... many, many more
|
||||
//
|
||||
// Somehow, these numeric values can be used as szFullCounterPath too:
|
||||
//
|
||||
// \2\6 will correspond to \\System\% Processor Time
|
||||
//
|
||||
// The typeperf command may also be pretty easy. To find all performance counters, simply execute:
|
||||
//
|
||||
// typeperf -qx
|
||||
func PdhAddCounter(hQuery PDH_HQUERY, szFullCounterPath string, dwUserData uintptr, phCounter *PDH_HCOUNTER) uint32 {
|
||||
ptxt, _ := syscall.UTF16PtrFromString(szFullCounterPath)
|
||||
ret, _, _ := pdh_AddCounterW.Call(
|
||||
uintptr(hQuery),
|
||||
uintptr(unsafe.Pointer(ptxt)),
|
||||
dwUserData,
|
||||
uintptr(unsafe.Pointer(phCounter)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Adds the specified language-neutral counter to the query. See the PdhAddCounter function. This function only exists on
|
||||
// Windows versions higher than Vista.
|
||||
func PdhAddEnglishCounter(hQuery PDH_HQUERY, szFullCounterPath string, dwUserData uintptr, phCounter *PDH_HCOUNTER) uint32 {
|
||||
if pdh_AddEnglishCounterW == nil {
|
||||
return ERROR_INVALID_FUNCTION
|
||||
}
|
||||
|
||||
ptxt, _ := syscall.UTF16PtrFromString(szFullCounterPath)
|
||||
ret, _, _ := pdh_AddEnglishCounterW.Call(
|
||||
uintptr(hQuery),
|
||||
uintptr(unsafe.Pointer(ptxt)),
|
||||
dwUserData,
|
||||
uintptr(unsafe.Pointer(phCounter)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Closes all counters contained in the specified query, closes all handles related to the query,
|
||||
// and frees all memory associated with the query.
|
||||
func PdhCloseQuery(hQuery PDH_HQUERY) uint32 {
|
||||
ret, _, _ := pdh_CloseQuery.Call(uintptr(hQuery))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Collects the current raw data value for all counters in the specified query and updates the status
|
||||
// code of each counter. With some counters, this function needs to be repeatedly called before the value
|
||||
// of the counter can be extracted with PdhGetFormattedCounterValue(). For example, the following code
|
||||
// requires at least two calls:
|
||||
//
|
||||
// var handle win.PDH_HQUERY
|
||||
// var counterHandle win.PDH_HCOUNTER
|
||||
// ret := win.PdhOpenQuery(0, 0, &handle)
|
||||
// ret = win.PdhAddEnglishCounter(handle, "\\Processor(_Total)\\% Idle Time", 0, &counterHandle)
|
||||
// var derp win.PDH_FMT_COUNTERVALUE_DOUBLE
|
||||
//
|
||||
// ret = win.PdhCollectQueryData(handle)
|
||||
// fmt.Printf("Collect return code is %x\n", ret) // return code will be PDH_CSTATUS_INVALID_DATA
|
||||
// ret = win.PdhGetFormattedCounterValueDouble(counterHandle, 0, &derp)
|
||||
//
|
||||
// ret = win.PdhCollectQueryData(handle)
|
||||
// fmt.Printf("Collect return code is %x\n", ret) // return code will be ERROR_SUCCESS
|
||||
// ret = win.PdhGetFormattedCounterValueDouble(counterHandle, 0, &derp)
|
||||
//
|
||||
// The PdhCollectQueryData will return an error in the first call because it needs two values for
|
||||
// displaying the correct data for the processor idle time. The second call will have a 0 return code.
|
||||
func PdhCollectQueryData(hQuery PDH_HQUERY) uint32 {
|
||||
ret, _, _ := pdh_CollectQueryData.Call(uintptr(hQuery))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Formats the given hCounter using a 'double'. The result is set into the specialized union struct pValue.
|
||||
// This function does not directly translate to a Windows counterpart due to union specialization tricks.
|
||||
func PdhGetFormattedCounterValueDouble(hCounter PDH_HCOUNTER, lpdwType *uint32, pValue *PDH_FMT_COUNTERVALUE_DOUBLE) uint32 {
|
||||
ret, _, _ := pdh_GetFormattedCounterValue.Call(
|
||||
uintptr(hCounter),
|
||||
uintptr(PDH_FMT_DOUBLE),
|
||||
uintptr(unsafe.Pointer(lpdwType)),
|
||||
uintptr(unsafe.Pointer(pValue)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Formats the given hCounter using a large int (int64). The result is set into the specialized union struct pValue.
|
||||
// This function does not directly translate to a Windows counterpart due to union specialization tricks.
|
||||
func PdhGetFormattedCounterValueLarge(hCounter PDH_HCOUNTER, lpdwType *uint32, pValue *PDH_FMT_COUNTERVALUE_LARGE) uint32 {
|
||||
ret, _, _ := pdh_GetFormattedCounterValue.Call(
|
||||
uintptr(hCounter),
|
||||
uintptr(PDH_FMT_LARGE),
|
||||
uintptr(unsafe.Pointer(lpdwType)),
|
||||
uintptr(unsafe.Pointer(pValue)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Formats the given hCounter using a 'long'. The result is set into the specialized union struct pValue.
|
||||
// This function does not directly translate to a Windows counterpart due to union specialization tricks.
|
||||
//
|
||||
// BUG(krpors): Testing this function on multiple systems yielded inconsistent results. For instance,
|
||||
// the pValue.LongValue kept the value '192' on test system A, but on B this was '0', while the padding
|
||||
// bytes of the struct got the correct value. Until someone can figure out this behaviour, prefer to use
|
||||
// the Double or Large counterparts instead. These functions provide actually the same data, except in
|
||||
// a different, working format.
|
||||
func PdhGetFormattedCounterValueLong(hCounter PDH_HCOUNTER, lpdwType *uint32, pValue *PDH_FMT_COUNTERVALUE_LONG) uint32 {
|
||||
ret, _, _ := pdh_GetFormattedCounterValue.Call(
|
||||
uintptr(hCounter),
|
||||
uintptr(PDH_FMT_LONG),
|
||||
uintptr(unsafe.Pointer(lpdwType)),
|
||||
uintptr(unsafe.Pointer(pValue)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Returns an array of formatted counter values. Use this function when you want to format the counter values of a
|
||||
// counter that contains a wildcard character for the instance name. The itemBuffer must a slice of type PDH_FMT_COUNTERVALUE_ITEM_DOUBLE.
|
||||
// An example of how this function can be used:
|
||||
//
|
||||
// okPath := "\\Process(*)\\% Processor Time" // notice the wildcard * character
|
||||
//
|
||||
// // ommitted all necessary stuff ...
|
||||
//
|
||||
// var bufSize uint32
|
||||
// var bufCount uint32
|
||||
// var size uint32 = uint32(unsafe.Sizeof(win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE{}))
|
||||
// var emptyBuf [1]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE // need at least 1 addressable null ptr.
|
||||
//
|
||||
// for {
|
||||
// // collect
|
||||
// ret := win.PdhCollectQueryData(queryHandle)
|
||||
// if ret == win.ERROR_SUCCESS {
|
||||
// ret = win.PdhGetFormattedCounterArrayDouble(counterHandle, &bufSize, &bufCount, &emptyBuf[0]) // uses null ptr here according to MSDN.
|
||||
// if ret == win.PDH_MORE_DATA {
|
||||
// filledBuf := make([]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size)
|
||||
// ret = win.PdhGetFormattedCounterArrayDouble(counterHandle, &bufSize, &bufCount, &filledBuf[0])
|
||||
// for i := 0; i < int(bufCount); i++ {
|
||||
// c := filledBuf[i]
|
||||
// var s string = win.UTF16PtrToString(c.SzName)
|
||||
// fmt.Printf("Index %d -> %s, value %v\n", i, s, c.FmtValue.DoubleValue)
|
||||
// }
|
||||
//
|
||||
// filledBuf = nil
|
||||
// // Need to at least set bufSize to zero, because if not, the function will not
|
||||
// // return PDH_MORE_DATA and will not set the bufSize.
|
||||
// bufCount = 0
|
||||
// bufSize = 0
|
||||
// }
|
||||
//
|
||||
// time.Sleep(2000 * time.Millisecond)
|
||||
// }
|
||||
// }
|
||||
func PdhGetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_DOUBLE) uint32 {
|
||||
ret, _, _ := pdh_GetFormattedCounterArrayW.Call(
|
||||
uintptr(hCounter),
|
||||
uintptr(PDH_FMT_DOUBLE),
|
||||
uintptr(unsafe.Pointer(lpdwBufferSize)),
|
||||
uintptr(unsafe.Pointer(lpdwBufferCount)),
|
||||
uintptr(unsafe.Pointer(itemBuffer)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Returns an array of formatted counter values. Use this function when you want to format the counter values of a
|
||||
// counter that contains a wildcard character for the instance name. The itemBuffer must a slice of type PDH_FMT_COUNTERVALUE_ITEM_LARGE.
|
||||
// For an example usage, see PdhGetFormattedCounterArrayDouble.
|
||||
func PdhGetFormattedCounterArrayLarge(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_LARGE) uint32 {
|
||||
ret, _, _ := pdh_GetFormattedCounterArrayW.Call(
|
||||
uintptr(hCounter),
|
||||
uintptr(PDH_FMT_LARGE),
|
||||
uintptr(unsafe.Pointer(lpdwBufferSize)),
|
||||
uintptr(unsafe.Pointer(lpdwBufferCount)),
|
||||
uintptr(unsafe.Pointer(itemBuffer)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Returns an array of formatted counter values. Use this function when you want to format the counter values of a
|
||||
// counter that contains a wildcard character for the instance name. The itemBuffer must a slice of type PDH_FMT_COUNTERVALUE_ITEM_LONG.
|
||||
// For an example usage, see PdhGetFormattedCounterArrayDouble.
|
||||
//
|
||||
// BUG(krpors): See description of PdhGetFormattedCounterValueLong().
|
||||
func PdhGetFormattedCounterArrayLong(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_LONG) uint32 {
|
||||
ret, _, _ := pdh_GetFormattedCounterArrayW.Call(
|
||||
uintptr(hCounter),
|
||||
uintptr(PDH_FMT_LONG),
|
||||
uintptr(unsafe.Pointer(lpdwBufferSize)),
|
||||
uintptr(unsafe.Pointer(lpdwBufferCount)),
|
||||
uintptr(unsafe.Pointer(itemBuffer)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Creates a new query that is used to manage the collection of performance data.
|
||||
// szDataSource is a null terminated string that specifies the name of the log file from which to
|
||||
// retrieve the performance data. If 0, performance data is collected from a real-time data source.
|
||||
// dwUserData is a user-defined value to associate with this query. To retrieve the user data later,
|
||||
// call PdhGetCounterInfo and access dwQueryUserData of the PDH_COUNTER_INFO structure. phQuery is
|
||||
// the handle to the query, and must be used in subsequent calls. This function returns a PDH_
|
||||
// constant error code, or ERROR_SUCCESS if the call succeeded.
|
||||
func PdhOpenQuery(szDataSource uintptr, dwUserData uintptr, phQuery *PDH_HQUERY) uint32 {
|
||||
ret, _, _ := pdh_OpenQuery.Call(
|
||||
szDataSource,
|
||||
dwUserData,
|
||||
uintptr(unsafe.Pointer(phQuery)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Validates a path. Will return ERROR_SUCCESS when ok, or PDH_CSTATUS_BAD_COUNTERNAME when the path is
|
||||
// erroneous.
|
||||
func PdhValidatePath(path string) uint32 {
|
||||
ptxt, _ := syscall.UTF16PtrFromString(path)
|
||||
ret, _, _ := pdh_ValidatePathW.Call(uintptr(unsafe.Pointer(ptxt)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
func UTF16PtrToString(s *uint16) string {
|
||||
if s == nil {
|
||||
return ""
|
||||
}
|
||||
return syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(s))[0:])
|
||||
}
|
1
e2e/vendor/github.com/Microsoft/go-winio/.gitattributes
generated
vendored
Normal file
1
e2e/vendor/github.com/Microsoft/go-winio/.gitattributes
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
* text=auto eol=lf
|
10
e2e/vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
Normal file
10
e2e/vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
.vscode/
|
||||
|
||||
*.exe
|
||||
|
||||
# testing
|
||||
testdata
|
||||
|
||||
# go workspaces
|
||||
go.work
|
||||
go.work.sum
|
147
e2e/vendor/github.com/Microsoft/go-winio/.golangci.yml
generated
vendored
Normal file
147
e2e/vendor/github.com/Microsoft/go-winio/.golangci.yml
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
linters:
|
||||
enable:
|
||||
# style
|
||||
- containedctx # struct contains a context
|
||||
- dupl # duplicate code
|
||||
- errname # erorrs are named correctly
|
||||
- nolintlint # "//nolint" directives are properly explained
|
||||
- revive # golint replacement
|
||||
- unconvert # unnecessary conversions
|
||||
- wastedassign
|
||||
|
||||
# bugs, performance, unused, etc ...
|
||||
- contextcheck # function uses a non-inherited context
|
||||
- errorlint # errors not wrapped for 1.13
|
||||
- exhaustive # check exhaustiveness of enum switch statements
|
||||
- gofmt # files are gofmt'ed
|
||||
- gosec # security
|
||||
- nilerr # returns nil even with non-nil error
|
||||
- thelper # test helpers without t.Helper()
|
||||
- unparam # unused function params
|
||||
|
||||
issues:
|
||||
exclude-dirs:
|
||||
- pkg/etw/sample
|
||||
|
||||
exclude-rules:
|
||||
# err is very often shadowed in nested scopes
|
||||
- linters:
|
||||
- govet
|
||||
text: '^shadow: declaration of "err" shadows declaration'
|
||||
|
||||
# ignore long lines for skip autogen directives
|
||||
- linters:
|
||||
- revive
|
||||
text: "^line-length-limit: "
|
||||
source: "^//(go:generate|sys) "
|
||||
|
||||
#TODO: remove after upgrading to go1.18
|
||||
# ignore comment spacing for nolint and sys directives
|
||||
- linters:
|
||||
- revive
|
||||
text: "^comment-spacings: no space between comment delimiter and comment text"
|
||||
source: "//(cspell:|nolint:|sys |todo)"
|
||||
|
||||
# not on go 1.18 yet, so no any
|
||||
- linters:
|
||||
- revive
|
||||
text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'"
|
||||
|
||||
# allow unjustified ignores of error checks in defer statements
|
||||
- linters:
|
||||
- nolintlint
|
||||
text: "^directive `//nolint:errcheck` should provide explanation"
|
||||
source: '^\s*defer '
|
||||
|
||||
# allow unjustified ignores of error lints for io.EOF
|
||||
- linters:
|
||||
- nolintlint
|
||||
text: "^directive `//nolint:errorlint` should provide explanation"
|
||||
source: '[=|!]= io.EOF'
|
||||
|
||||
|
||||
linters-settings:
|
||||
exhaustive:
|
||||
default-signifies-exhaustive: true
|
||||
govet:
|
||||
enable-all: true
|
||||
disable:
|
||||
# struct order is often for Win32 compat
|
||||
# also, ignore pointer bytes/GC issues for now until performance becomes an issue
|
||||
- fieldalignment
|
||||
nolintlint:
|
||||
require-explanation: true
|
||||
require-specific: true
|
||||
revive:
|
||||
# revive is more configurable than static check, so likely the preferred alternative to static-check
|
||||
# (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997)
|
||||
enable-all-rules:
|
||||
true
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
|
||||
rules:
|
||||
# rules with required arguments
|
||||
- name: argument-limit
|
||||
disabled: true
|
||||
- name: banned-characters
|
||||
disabled: true
|
||||
- name: cognitive-complexity
|
||||
disabled: true
|
||||
- name: cyclomatic
|
||||
disabled: true
|
||||
- name: file-header
|
||||
disabled: true
|
||||
- name: function-length
|
||||
disabled: true
|
||||
- name: function-result-limit
|
||||
disabled: true
|
||||
- name: max-public-structs
|
||||
disabled: true
|
||||
# geneally annoying rules
|
||||
- name: add-constant # complains about any and all strings and integers
|
||||
disabled: true
|
||||
- name: confusing-naming # we frequently use "Foo()" and "foo()" together
|
||||
disabled: true
|
||||
- name: flag-parameter # excessive, and a common idiom we use
|
||||
disabled: true
|
||||
- name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead
|
||||
disabled: true
|
||||
# general config
|
||||
- name: line-length-limit
|
||||
arguments:
|
||||
- 140
|
||||
- name: var-naming
|
||||
arguments:
|
||||
- []
|
||||
- - CID
|
||||
- CRI
|
||||
- CTRD
|
||||
- DACL
|
||||
- DLL
|
||||
- DOS
|
||||
- ETW
|
||||
- FSCTL
|
||||
- GCS
|
||||
- GMSA
|
||||
- HCS
|
||||
- HV
|
||||
- IO
|
||||
- LCOW
|
||||
- LDAP
|
||||
- LPAC
|
||||
- LTSC
|
||||
- MMIO
|
||||
- NT
|
||||
- OCI
|
||||
- PMEM
|
||||
- PWSH
|
||||
- RX
|
||||
- SACl
|
||||
- SID
|
||||
- SMB
|
||||
- TX
|
||||
- VHD
|
||||
- VHDX
|
||||
- VMID
|
||||
- VPCI
|
||||
- WCOW
|
||||
- WIM
|
1
e2e/vendor/github.com/Microsoft/go-winio/CODEOWNERS
generated
vendored
Normal file
1
e2e/vendor/github.com/Microsoft/go-winio/CODEOWNERS
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
* @microsoft/containerplat
|
22
e2e/vendor/github.com/Microsoft/go-winio/LICENSE
generated
vendored
Normal file
22
e2e/vendor/github.com/Microsoft/go-winio/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Microsoft
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
89
e2e/vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
Normal file
89
e2e/vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
# go-winio [](https://github.com/microsoft/go-winio/actions/workflows/ci.yml)
|
||||
|
||||
This repository contains utilities for efficiently performing Win32 IO operations in
|
||||
Go. Currently, this is focused on accessing named pipes and other file handles, and
|
||||
for using named pipes as a net transport.
|
||||
|
||||
This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
|
||||
to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
|
||||
newer operating systems. This is similar to the implementation of network sockets in Go's net
|
||||
package.
|
||||
|
||||
Please see the LICENSE file for licensing information.
|
||||
|
||||
## Contributing
|
||||
|
||||
This project welcomes contributions and suggestions.
|
||||
Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that
|
||||
you have the right to, and actually do, grant us the rights to use your contribution.
|
||||
For details, visit [Microsoft CLA](https://cla.microsoft.com).
|
||||
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether you need to
|
||||
provide a CLA and decorate the PR appropriately (e.g., label, comment).
|
||||
Simply follow the instructions provided by the bot.
|
||||
You will only need to do this once across all repos using our CLA.
|
||||
|
||||
Additionally, the pull request pipeline requires the following steps to be performed before
|
||||
mergining.
|
||||
|
||||
### Code Sign-Off
|
||||
|
||||
We require that contributors sign their commits using [`git commit --signoff`][git-commit-s]
|
||||
to certify they either authored the work themselves or otherwise have permission to use it in this project.
|
||||
|
||||
A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s].
|
||||
|
||||
Please see [the developer certificate](https://developercertificate.org) for more info,
|
||||
as well as to make sure that you can attest to the rules listed.
|
||||
Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
|
||||
|
||||
### Linting
|
||||
|
||||
Code must pass a linting stage, which uses [`golangci-lint`][lint].
|
||||
The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run
|
||||
automatically with VSCode by adding the following to your workspace or folder settings:
|
||||
|
||||
```json
|
||||
"go.lintTool": "golangci-lint",
|
||||
"go.lintOnSave": "package",
|
||||
```
|
||||
|
||||
Additional editor [integrations options are also available][lint-ide].
|
||||
|
||||
Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root:
|
||||
|
||||
```shell
|
||||
# use . or specify a path to only lint a package
|
||||
# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0"
|
||||
> golangci-lint run ./...
|
||||
```
|
||||
|
||||
### Go Generate
|
||||
|
||||
The pipeline checks that auto-generated code, via `go generate`, are up to date.
|
||||
|
||||
This can be done for the entire repo:
|
||||
|
||||
```shell
|
||||
> go generate ./...
|
||||
```
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
|
||||
## Special Thanks
|
||||
|
||||
Thanks to [natefinch][natefinch] for the inspiration for this library.
|
||||
See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation.
|
||||
|
||||
[lint]: https://golangci-lint.run/
|
||||
[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration
|
||||
[lint-install]: https://golangci-lint.run/usage/install/#local-installation
|
||||
|
||||
[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s
|
||||
[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff
|
||||
|
||||
[natefinch]: https://github.com/natefinch
|
41
e2e/vendor/github.com/Microsoft/go-winio/SECURITY.md
generated
vendored
Normal file
41
e2e/vendor/github.com/Microsoft/go-winio/SECURITY.md
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.7 BLOCK -->
|
||||
|
||||
## Security
|
||||
|
||||
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
|
||||
|
||||
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
**Please do not report security vulnerabilities through public GitHub issues.**
|
||||
|
||||
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
|
||||
|
||||
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
|
||||
|
||||
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
|
||||
|
||||
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
||||
|
||||
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
||||
* Full paths of source file(s) related to the manifestation of the issue
|
||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||
* Any special configuration required to reproduce the issue
|
||||
* Step-by-step instructions to reproduce the issue
|
||||
* Proof-of-concept or exploit code (if possible)
|
||||
* Impact of the issue, including how an attacker might exploit the issue
|
||||
|
||||
This information will help us triage your report more quickly.
|
||||
|
||||
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
|
||||
|
||||
## Preferred Languages
|
||||
|
||||
We prefer all communications to be in English.
|
||||
|
||||
## Policy
|
||||
|
||||
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
|
||||
|
||||
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
287
e2e/vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
Normal file
287
e2e/vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
Normal file
@ -0,0 +1,287 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"unicode/utf16"
|
||||
|
||||
"github.com/Microsoft/go-winio/internal/fs"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
||||
//sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
|
||||
|
||||
const (
|
||||
BackupData = uint32(iota + 1)
|
||||
BackupEaData
|
||||
BackupSecurity
|
||||
BackupAlternateData
|
||||
BackupLink
|
||||
BackupPropertyData
|
||||
BackupObjectId //revive:disable-line:var-naming ID, not Id
|
||||
BackupReparseData
|
||||
BackupSparseBlock
|
||||
BackupTxfsData
|
||||
)
|
||||
|
||||
const (
|
||||
StreamSparseAttributes = uint32(8)
|
||||
)
|
||||
|
||||
//nolint:revive // var-naming: ALL_CAPS
|
||||
const (
|
||||
WRITE_DAC = windows.WRITE_DAC
|
||||
WRITE_OWNER = windows.WRITE_OWNER
|
||||
ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY
|
||||
)
|
||||
|
||||
// BackupHeader represents a backup stream of a file.
|
||||
type BackupHeader struct {
|
||||
//revive:disable-next-line:var-naming ID, not Id
|
||||
Id uint32 // The backup stream ID
|
||||
Attributes uint32 // Stream attributes
|
||||
Size int64 // The size of the stream in bytes
|
||||
Name string // The name of the stream (for BackupAlternateData only).
|
||||
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
|
||||
}
|
||||
|
||||
type win32StreamID struct {
|
||||
StreamID uint32
|
||||
Attributes uint32
|
||||
Size uint64
|
||||
NameSize uint32
|
||||
}
|
||||
|
||||
// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
|
||||
// of BackupHeader values.
|
||||
type BackupStreamReader struct {
|
||||
r io.Reader
|
||||
bytesLeft int64
|
||||
}
|
||||
|
||||
// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
|
||||
func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
|
||||
return &BackupStreamReader{r, 0}
|
||||
}
|
||||
|
||||
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
|
||||
// it was not completely read.
|
||||
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
||||
if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this
|
||||
if s, ok := r.r.(io.Seeker); ok {
|
||||
// Make sure Seek on io.SeekCurrent sometimes succeeds
|
||||
// before trying the actual seek.
|
||||
if _, err := s.Seek(0, io.SeekCurrent); err == nil {
|
||||
if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.bytesLeft = 0
|
||||
}
|
||||
}
|
||||
if _, err := io.Copy(io.Discard, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var wsi win32StreamID
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr := &BackupHeader{
|
||||
Id: wsi.StreamID,
|
||||
Attributes: wsi.Attributes,
|
||||
Size: int64(wsi.Size),
|
||||
}
|
||||
if wsi.NameSize != 0 {
|
||||
name := make([]uint16, int(wsi.NameSize/2))
|
||||
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Name = windows.UTF16ToString(name)
|
||||
}
|
||||
if wsi.StreamID == BackupSparseBlock {
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Size -= 8
|
||||
}
|
||||
r.bytesLeft = hdr.Size
|
||||
return hdr, nil
|
||||
}
|
||||
|
||||
// Read reads from the current backup stream.
|
||||
func (r *BackupStreamReader) Read(b []byte) (int, error) {
|
||||
if r.bytesLeft == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(b)) > r.bytesLeft {
|
||||
b = b[:r.bytesLeft]
|
||||
}
|
||||
n, err := r.r.Read(b)
|
||||
r.bytesLeft -= int64(n)
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
} else if r.bytesLeft == 0 && err == nil {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
|
||||
type BackupStreamWriter struct {
|
||||
w io.Writer
|
||||
bytesLeft int64
|
||||
}
|
||||
|
||||
// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
|
||||
func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter {
|
||||
return &BackupStreamWriter{w, 0}
|
||||
}
|
||||
|
||||
// WriteHeader writes the next backup stream header and prepares for calls to Write().
|
||||
func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
|
||||
if w.bytesLeft != 0 {
|
||||
return fmt.Errorf("missing %d bytes", w.bytesLeft)
|
||||
}
|
||||
name := utf16.Encode([]rune(hdr.Name))
|
||||
wsi := win32StreamID{
|
||||
StreamID: hdr.Id,
|
||||
Attributes: hdr.Attributes,
|
||||
Size: uint64(hdr.Size),
|
||||
NameSize: uint32(len(name) * 2),
|
||||
}
|
||||
if hdr.Id == BackupSparseBlock {
|
||||
// Include space for the int64 block offset
|
||||
wsi.Size += 8
|
||||
}
|
||||
if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(name) != 0 {
|
||||
if err := binary.Write(w.w, binary.LittleEndian, name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if hdr.Id == BackupSparseBlock {
|
||||
if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.bytesLeft = hdr.Size
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes to the current backup stream.
|
||||
func (w *BackupStreamWriter) Write(b []byte) (int, error) {
|
||||
if w.bytesLeft < int64(len(b)) {
|
||||
return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft)
|
||||
}
|
||||
n, err := w.w.Write(b)
|
||||
w.bytesLeft -= int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
|
||||
type BackupFileReader struct {
|
||||
f *os.File
|
||||
includeSecurity bool
|
||||
ctx uintptr
|
||||
}
|
||||
|
||||
// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
|
||||
// Read will attempt to read the security descriptor of the file.
|
||||
func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
|
||||
r := &BackupFileReader{f, includeSecurity, 0}
|
||||
return r
|
||||
}
|
||||
|
||||
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
|
||||
func (r *BackupFileReader) Read(b []byte) (int, error) {
|
||||
var bytesRead uint32
|
||||
err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(r.f)
|
||||
if bytesRead == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return int(bytesRead), nil
|
||||
}
|
||||
|
||||
// Close frees Win32 resources associated with the BackupFileReader. It does not close
|
||||
// the underlying file.
|
||||
func (r *BackupFileReader) Close() error {
|
||||
if r.ctx != 0 {
|
||||
_ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
|
||||
runtime.KeepAlive(r.f)
|
||||
r.ctx = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
|
||||
type BackupFileWriter struct {
|
||||
f *os.File
|
||||
includeSecurity bool
|
||||
ctx uintptr
|
||||
}
|
||||
|
||||
// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
|
||||
// Write() will attempt to restore the security descriptor from the stream.
|
||||
func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
|
||||
w := &BackupFileWriter{f, includeSecurity, 0}
|
||||
return w
|
||||
}
|
||||
|
||||
// Write restores a portion of the file using the provided backup stream.
|
||||
func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
||||
var bytesWritten uint32
|
||||
err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(w.f)
|
||||
if int(bytesWritten) != len(b) {
|
||||
return int(bytesWritten), errors.New("not all bytes could be written")
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// Close frees Win32 resources associated with the BackupFileWriter. It does not
|
||||
// close the underlying file.
|
||||
func (w *BackupFileWriter) Close() error {
|
||||
if w.ctx != 0 {
|
||||
_ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
|
||||
runtime.KeepAlive(w.f)
|
||||
w.ctx = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OpenForBackup opens a file or directory, potentially skipping access checks if the backup
|
||||
// or restore privileges have been acquired.
|
||||
//
|
||||
// If the file opened was a directory, it cannot be used with Readdir().
|
||||
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
|
||||
h, err := fs.CreateFile(path,
|
||||
fs.AccessMask(access),
|
||||
fs.FileShareMode(share),
|
||||
nil,
|
||||
fs.FileCreationDisposition(createmode),
|
||||
fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT,
|
||||
0,
|
||||
)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "open", Path: path, Err: err}
|
||||
return nil, err
|
||||
}
|
||||
return os.NewFile(uintptr(h), path), nil
|
||||
}
|
22
e2e/vendor/github.com/Microsoft/go-winio/doc.go
generated
vendored
Normal file
22
e2e/vendor/github.com/Microsoft/go-winio/doc.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
// This package provides utilities for efficiently performing Win32 IO operations in Go.
|
||||
// Currently, this package is provides support for genreal IO and management of
|
||||
// - named pipes
|
||||
// - files
|
||||
// - [Hyper-V sockets]
|
||||
//
|
||||
// This code is similar to Go's [net] package, and uses IO completion ports to avoid
|
||||
// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines.
|
||||
//
|
||||
// This limits support to Windows Vista and newer operating systems.
|
||||
//
|
||||
// Additionally, this package provides support for:
|
||||
// - creating and managing GUIDs
|
||||
// - writing to [ETW]
|
||||
// - opening and manageing VHDs
|
||||
// - parsing [Windows Image files]
|
||||
// - auto-generating Win32 API code
|
||||
//
|
||||
// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service
|
||||
// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw-
|
||||
// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images
|
||||
package winio
|
137
e2e/vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
Normal file
137
e2e/vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type fileFullEaInformation struct {
|
||||
NextEntryOffset uint32
|
||||
Flags uint8
|
||||
NameLength uint8
|
||||
ValueLength uint16
|
||||
}
|
||||
|
||||
var (
|
||||
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
|
||||
|
||||
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
|
||||
errEaNameTooLarge = errors.New("extended attribute name too large")
|
||||
errEaValueTooLarge = errors.New("extended attribute value too large")
|
||||
)
|
||||
|
||||
// ExtendedAttribute represents a single Windows EA.
|
||||
type ExtendedAttribute struct {
|
||||
Name string
|
||||
Value []byte
|
||||
Flags uint8
|
||||
}
|
||||
|
||||
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
|
||||
var info fileFullEaInformation
|
||||
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
err = errInvalidEaBuffer
|
||||
return ea, nb, err
|
||||
}
|
||||
|
||||
nameOffset := fileFullEaInformationSize
|
||||
nameLen := int(info.NameLength)
|
||||
valueOffset := nameOffset + int(info.NameLength) + 1
|
||||
valueLen := int(info.ValueLength)
|
||||
nextOffset := int(info.NextEntryOffset)
|
||||
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
|
||||
err = errInvalidEaBuffer
|
||||
return ea, nb, err
|
||||
}
|
||||
|
||||
ea.Name = string(b[nameOffset : nameOffset+nameLen])
|
||||
ea.Value = b[valueOffset : valueOffset+valueLen]
|
||||
ea.Flags = info.Flags
|
||||
if info.NextEntryOffset != 0 {
|
||||
nb = b[info.NextEntryOffset:]
|
||||
}
|
||||
return ea, nb, err
|
||||
}
|
||||
|
||||
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
|
||||
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
|
||||
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
|
||||
for len(b) != 0 {
|
||||
ea, nb, err := parseEa(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eas = append(eas, ea)
|
||||
b = nb
|
||||
}
|
||||
return eas, err
|
||||
}
|
||||
|
||||
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
|
||||
if int(uint8(len(ea.Name))) != len(ea.Name) {
|
||||
return errEaNameTooLarge
|
||||
}
|
||||
if int(uint16(len(ea.Value))) != len(ea.Value) {
|
||||
return errEaValueTooLarge
|
||||
}
|
||||
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
|
||||
withPadding := (entrySize + 3) &^ 3
|
||||
nextOffset := uint32(0)
|
||||
if !last {
|
||||
nextOffset = withPadding
|
||||
}
|
||||
info := fileFullEaInformation{
|
||||
NextEntryOffset: nextOffset,
|
||||
Flags: ea.Flags,
|
||||
NameLength: uint8(len(ea.Name)),
|
||||
ValueLength: uint16(len(ea.Value)),
|
||||
}
|
||||
|
||||
err := binary.Write(buf, binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte(ea.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = buf.WriteByte(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write(ea.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
|
||||
// buffer for use with BackupWrite, ZwSetEaFile, etc.
|
||||
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
for i := range eas {
|
||||
last := false
|
||||
if i == len(eas)-1 {
|
||||
last = true
|
||||
}
|
||||
|
||||
err := writeEa(&buf, &eas[i], last)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
320
e2e/vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
Normal file
320
e2e/vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
Normal file
@ -0,0 +1,320 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx
|
||||
//sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort
|
||||
//sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||
//sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||
//sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
|
||||
|
||||
var (
|
||||
ErrFileClosed = errors.New("file has already been closed")
|
||||
ErrTimeout = &timeoutError{}
|
||||
)
|
||||
|
||||
type timeoutError struct{}
|
||||
|
||||
func (*timeoutError) Error() string { return "i/o timeout" }
|
||||
func (*timeoutError) Timeout() bool { return true }
|
||||
func (*timeoutError) Temporary() bool { return true }
|
||||
|
||||
type timeoutChan chan struct{}
|
||||
|
||||
var ioInitOnce sync.Once
|
||||
var ioCompletionPort windows.Handle
|
||||
|
||||
// ioResult contains the result of an asynchronous IO operation.
|
||||
type ioResult struct {
|
||||
bytes uint32
|
||||
err error
|
||||
}
|
||||
|
||||
// ioOperation represents an outstanding asynchronous Win32 IO.
|
||||
type ioOperation struct {
|
||||
o windows.Overlapped
|
||||
ch chan ioResult
|
||||
}
|
||||
|
||||
func initIO() {
|
||||
h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ioCompletionPort = h
|
||||
go ioCompletionProcessor(h)
|
||||
}
|
||||
|
||||
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
|
||||
// It takes ownership of this handle and will close it if it is garbage collected.
|
||||
type win32File struct {
|
||||
handle windows.Handle
|
||||
wg sync.WaitGroup
|
||||
wgLock sync.RWMutex
|
||||
closing atomic.Bool
|
||||
socket bool
|
||||
readDeadline deadlineHandler
|
||||
writeDeadline deadlineHandler
|
||||
}
|
||||
|
||||
type deadlineHandler struct {
|
||||
setLock sync.Mutex
|
||||
channel timeoutChan
|
||||
channelLock sync.RWMutex
|
||||
timer *time.Timer
|
||||
timedout atomic.Bool
|
||||
}
|
||||
|
||||
// makeWin32File makes a new win32File from an existing file handle.
|
||||
func makeWin32File(h windows.Handle) (*win32File, error) {
|
||||
f := &win32File{handle: h}
|
||||
ioInitOnce.Do(initIO)
|
||||
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.readDeadline.channel = make(timeoutChan)
|
||||
f.writeDeadline.channel = make(timeoutChan)
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Deprecated: use NewOpenFile instead.
|
||||
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||
return NewOpenFile(windows.Handle(h))
|
||||
}
|
||||
|
||||
func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) {
|
||||
// If we return the result of makeWin32File directly, it can result in an
|
||||
// interface-wrapped nil, rather than a nil interface value.
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// closeHandle closes the resources associated with a Win32 handle.
|
||||
func (f *win32File) closeHandle() {
|
||||
f.wgLock.Lock()
|
||||
// Atomically set that we are closing, releasing the resources only once.
|
||||
if !f.closing.Swap(true) {
|
||||
f.wgLock.Unlock()
|
||||
// cancel all IO and wait for it to complete
|
||||
_ = cancelIoEx(f.handle, nil)
|
||||
f.wg.Wait()
|
||||
// at this point, no new IO can start
|
||||
windows.Close(f.handle)
|
||||
f.handle = 0
|
||||
} else {
|
||||
f.wgLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes a win32File.
|
||||
func (f *win32File) Close() error {
|
||||
f.closeHandle()
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsClosed checks if the file has been closed.
|
||||
func (f *win32File) IsClosed() bool {
|
||||
return f.closing.Load()
|
||||
}
|
||||
|
||||
// prepareIO prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIO() (*ioOperation, error) {
|
||||
f.wgLock.RLock()
|
||||
if f.closing.Load() {
|
||||
f.wgLock.RUnlock()
|
||||
return nil, ErrFileClosed
|
||||
}
|
||||
f.wg.Add(1)
|
||||
f.wgLock.RUnlock()
|
||||
c := &ioOperation{}
|
||||
c.ch = make(chan ioResult)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// ioCompletionProcessor processes completed async IOs forever.
|
||||
func ioCompletionProcessor(h windows.Handle) {
|
||||
for {
|
||||
var bytes uint32
|
||||
var key uintptr
|
||||
var op *ioOperation
|
||||
err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE)
|
||||
if op == nil {
|
||||
panic(err)
|
||||
}
|
||||
op.ch <- ioResult{bytes, err}
|
||||
}
|
||||
}
|
||||
|
||||
// todo: helsaawy - create an asyncIO version that takes a context
|
||||
|
||||
// asyncIO processes the return value from ReadFile or WriteFile, blocking until
|
||||
// the operation has actually completed.
|
||||
func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
|
||||
if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno
|
||||
return int(bytes), err
|
||||
}
|
||||
|
||||
if f.closing.Load() {
|
||||
_ = cancelIoEx(f.handle, &c.o)
|
||||
}
|
||||
|
||||
var timeout timeoutChan
|
||||
if d != nil {
|
||||
d.channelLock.Lock()
|
||||
timeout = d.channel
|
||||
d.channelLock.Unlock()
|
||||
}
|
||||
|
||||
var r ioResult
|
||||
select {
|
||||
case r = <-c.ch:
|
||||
err = r.err
|
||||
if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
|
||||
if f.closing.Load() {
|
||||
err = ErrFileClosed
|
||||
}
|
||||
} else if err != nil && f.socket {
|
||||
// err is from Win32. Query the overlapped structure to get the winsock error.
|
||||
var bytes, flags uint32
|
||||
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
|
||||
}
|
||||
case <-timeout:
|
||||
_ = cancelIoEx(f.handle, &c.o)
|
||||
r = <-c.ch
|
||||
err = r.err
|
||||
if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
|
||||
err = ErrTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// runtime.KeepAlive is needed, as c is passed via native
|
||||
// code to ioCompletionProcessor, c must remain alive
|
||||
// until the channel read is complete.
|
||||
// todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive?
|
||||
runtime.KeepAlive(c)
|
||||
return int(r.bytes), err
|
||||
}
|
||||
|
||||
// Read reads from a file handle.
|
||||
func (f *win32File) Read(b []byte) (int, error) {
|
||||
c, err := f.prepareIO()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.readDeadline.timedout.Load() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = windows.ReadFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIO(c, &f.readDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
|
||||
// Handle EOF conditions.
|
||||
if err == nil && n == 0 && len(b) != 0 {
|
||||
return 0, io.EOF
|
||||
} else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno
|
||||
return 0, io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Write writes to a file handle.
|
||||
func (f *win32File) Write(b []byte) (int, error) {
|
||||
c, err := f.prepareIO()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.writeDeadline.timedout.Load() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = windows.WriteFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIO(c, &f.writeDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *win32File) SetReadDeadline(deadline time.Time) error {
|
||||
return f.readDeadline.set(deadline)
|
||||
}
|
||||
|
||||
func (f *win32File) SetWriteDeadline(deadline time.Time) error {
|
||||
return f.writeDeadline.set(deadline)
|
||||
}
|
||||
|
||||
func (f *win32File) Flush() error {
|
||||
return windows.FlushFileBuffers(f.handle)
|
||||
}
|
||||
|
||||
func (f *win32File) Fd() uintptr {
|
||||
return uintptr(f.handle)
|
||||
}
|
||||
|
||||
func (d *deadlineHandler) set(deadline time.Time) error {
|
||||
d.setLock.Lock()
|
||||
defer d.setLock.Unlock()
|
||||
|
||||
if d.timer != nil {
|
||||
if !d.timer.Stop() {
|
||||
<-d.channel
|
||||
}
|
||||
d.timer = nil
|
||||
}
|
||||
d.timedout.Store(false)
|
||||
|
||||
select {
|
||||
case <-d.channel:
|
||||
d.channelLock.Lock()
|
||||
d.channel = make(chan struct{})
|
||||
d.channelLock.Unlock()
|
||||
default:
|
||||
}
|
||||
|
||||
if deadline.IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
timeoutIO := func() {
|
||||
d.timedout.Store(true)
|
||||
close(d.channel)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
duration := deadline.Sub(now)
|
||||
if deadline.After(now) {
|
||||
// Deadline is in the future, set a timer to wait
|
||||
d.timer = time.AfterFunc(duration, timeoutIO)
|
||||
} else {
|
||||
// Deadline is in the past. Cancel all pending IO now.
|
||||
timeoutIO()
|
||||
}
|
||||
return nil
|
||||
}
|
106
e2e/vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
Normal file
106
e2e/vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
Normal file
@ -0,0 +1,106 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// FileBasicInfo contains file access time and file attributes information.
|
||||
type FileBasicInfo struct {
|
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime
|
||||
FileAttributes uint32
|
||||
_ uint32 // padding
|
||||
}
|
||||
|
||||
// alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing
|
||||
// uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64
|
||||
// alignment is necessary to pass this as FILE_BASIC_INFO.
|
||||
type alignedFileBasicInfo struct {
|
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64
|
||||
FileAttributes uint32
|
||||
_ uint32 // padding
|
||||
}
|
||||
|
||||
// GetFileBasicInfo retrieves times and attributes for a file.
|
||||
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
||||
bi := &alignedFileBasicInfo{}
|
||||
if err := windows.GetFileInformationByHandleEx(
|
||||
windows.Handle(f.Fd()),
|
||||
windows.FileBasicInfo,
|
||||
(*byte)(unsafe.Pointer(bi)),
|
||||
uint32(unsafe.Sizeof(*bi)),
|
||||
); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
// Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the
|
||||
// public API of this module. The data may be unnecessarily aligned.
|
||||
return (*FileBasicInfo)(unsafe.Pointer(bi)), nil
|
||||
}
|
||||
|
||||
// SetFileBasicInfo sets times and attributes for a file.
|
||||
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
|
||||
// Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is
|
||||
// suitable to pass to GetFileInformationByHandleEx.
|
||||
biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi))
|
||||
if err := windows.SetFileInformationByHandle(
|
||||
windows.Handle(f.Fd()),
|
||||
windows.FileBasicInfo,
|
||||
(*byte)(unsafe.Pointer(&biAligned)),
|
||||
uint32(unsafe.Sizeof(biAligned)),
|
||||
); err != nil {
|
||||
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileStandardInfo contains extended information for the file.
|
||||
// FILE_STANDARD_INFO in WinBase.h
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info
|
||||
type FileStandardInfo struct {
|
||||
AllocationSize, EndOfFile int64
|
||||
NumberOfLinks uint32
|
||||
DeletePending, Directory bool
|
||||
}
|
||||
|
||||
// GetFileStandardInfo retrieves ended information for the file.
|
||||
func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) {
|
||||
si := &FileStandardInfo{}
|
||||
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()),
|
||||
windows.FileStandardInfo,
|
||||
(*byte)(unsafe.Pointer(si)),
|
||||
uint32(unsafe.Sizeof(*si))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return si, nil
|
||||
}
|
||||
|
||||
// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
|
||||
// unique on a system.
|
||||
type FileIDInfo struct {
|
||||
VolumeSerialNumber uint64
|
||||
FileID [16]byte
|
||||
}
|
||||
|
||||
// GetFileID retrieves the unique (volume, file ID) pair for a file.
|
||||
func GetFileID(f *os.File) (*FileIDInfo, error) {
|
||||
fileID := &FileIDInfo{}
|
||||
if err := windows.GetFileInformationByHandleEx(
|
||||
windows.Handle(f.Fd()),
|
||||
windows.FileIdInfo,
|
||||
(*byte)(unsafe.Pointer(fileID)),
|
||||
uint32(unsafe.Sizeof(*fileID)),
|
||||
); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return fileID, nil
|
||||
}
|
582
e2e/vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
Normal file
582
e2e/vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
Normal file
@ -0,0 +1,582 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
|
||||
"github.com/Microsoft/go-winio/internal/socket"
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
)
|
||||
|
||||
const afHVSock = 34 // AF_HYPERV
|
||||
|
||||
// Well known Service and VM IDs
|
||||
// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards
|
||||
|
||||
// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions.
|
||||
func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000
|
||||
return guid.GUID{}
|
||||
}
|
||||
|
||||
// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions.
|
||||
func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff
|
||||
return guid.GUID{
|
||||
Data1: 0xffffffff,
|
||||
Data2: 0xffff,
|
||||
Data3: 0xffff,
|
||||
Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
|
||||
}
|
||||
}
|
||||
|
||||
// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector.
|
||||
func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838
|
||||
return guid.GUID{
|
||||
Data1: 0xe0e16197,
|
||||
Data2: 0xdd56,
|
||||
Data3: 0x4a10,
|
||||
Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38},
|
||||
}
|
||||
}
|
||||
|
||||
// HvsockGUIDSiloHost is the address of a silo's host partition:
|
||||
// - The silo host of a hosted silo is the utility VM.
|
||||
// - The silo host of a silo on a physical host is the physical host.
|
||||
func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568
|
||||
return guid.GUID{
|
||||
Data1: 0x36bd0c5c,
|
||||
Data2: 0x7276,
|
||||
Data3: 0x4223,
|
||||
Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68},
|
||||
}
|
||||
}
|
||||
|
||||
// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions.
|
||||
func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd
|
||||
return guid.GUID{
|
||||
Data1: 0x90db8b89,
|
||||
Data2: 0xd35,
|
||||
Data3: 0x4f79,
|
||||
Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd},
|
||||
}
|
||||
}
|
||||
|
||||
// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition.
|
||||
// Listening on this VmId accepts connection from:
|
||||
// - Inside silos: silo host partition.
|
||||
// - Inside hosted silo: host of the VM.
|
||||
// - Inside VM: VM host.
|
||||
// - Physical host: Not supported.
|
||||
func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878
|
||||
return guid.GUID{
|
||||
Data1: 0xa42e7cda,
|
||||
Data2: 0xd03f,
|
||||
Data3: 0x480c,
|
||||
Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78},
|
||||
}
|
||||
}
|
||||
|
||||
// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol.
|
||||
func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3
|
||||
return guid.GUID{
|
||||
Data2: 0xfacb,
|
||||
Data3: 0x11e6,
|
||||
Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3},
|
||||
}
|
||||
}
|
||||
|
||||
// An HvsockAddr is an address for a AF_HYPERV socket.
|
||||
type HvsockAddr struct {
|
||||
VMID guid.GUID
|
||||
ServiceID guid.GUID
|
||||
}
|
||||
|
||||
type rawHvsockAddr struct {
|
||||
Family uint16
|
||||
_ uint16
|
||||
VMID guid.GUID
|
||||
ServiceID guid.GUID
|
||||
}
|
||||
|
||||
var _ socket.RawSockaddr = &rawHvsockAddr{}
|
||||
|
||||
// Network returns the address's network name, "hvsock".
|
||||
func (*HvsockAddr) Network() string {
|
||||
return "hvsock"
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) String() string {
|
||||
return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID)
|
||||
}
|
||||
|
||||
// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
|
||||
func VsockServiceID(port uint32) guid.GUID {
|
||||
g := hvsockVsockServiceTemplate() // make a copy
|
||||
g.Data1 = port
|
||||
return g
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) raw() rawHvsockAddr {
|
||||
return rawHvsockAddr{
|
||||
Family: afHVSock,
|
||||
VMID: addr.VMID,
|
||||
ServiceID: addr.ServiceID,
|
||||
}
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
|
||||
addr.VMID = raw.VMID
|
||||
addr.ServiceID = raw.ServiceID
|
||||
}
|
||||
|
||||
// Sockaddr returns a pointer to and the size of this struct.
|
||||
//
|
||||
// Implements the [socket.RawSockaddr] interface, and allows use in
|
||||
// [socket.Bind] and [socket.ConnectEx].
|
||||
func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) {
|
||||
return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil
|
||||
}
|
||||
|
||||
// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`.
|
||||
func (r *rawHvsockAddr) FromBytes(b []byte) error {
|
||||
n := int(unsafe.Sizeof(rawHvsockAddr{}))
|
||||
|
||||
if len(b) < n {
|
||||
return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize)
|
||||
}
|
||||
|
||||
copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n])
|
||||
if r.Family != afHVSock {
|
||||
return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HvsockListener is a socket listener for the AF_HYPERV address family.
|
||||
type HvsockListener struct {
|
||||
sock *win32File
|
||||
addr HvsockAddr
|
||||
}
|
||||
|
||||
var _ net.Listener = &HvsockListener{}
|
||||
|
||||
// HvsockConn is a connected socket of the AF_HYPERV address family.
|
||||
type HvsockConn struct {
|
||||
sock *win32File
|
||||
local, remote HvsockAddr
|
||||
}
|
||||
|
||||
var _ net.Conn = &HvsockConn{}
|
||||
|
||||
func newHVSocket() (*win32File, error) {
|
||||
fd, err := windows.Socket(afHVSock, windows.SOCK_STREAM, 1)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("socket", err)
|
||||
}
|
||||
f, err := makeWin32File(fd)
|
||||
if err != nil {
|
||||
windows.Close(fd)
|
||||
return nil, err
|
||||
}
|
||||
f.socket = true
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// ListenHvsock listens for connections on the specified hvsock address.
|
||||
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
|
||||
l := &HvsockListener{addr: *addr}
|
||||
|
||||
var sock *win32File
|
||||
sock, err = newHVSocket()
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
_ = sock.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
sa := addr.raw()
|
||||
err = socket.Bind(sock.handle, &sa)
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
|
||||
}
|
||||
err = windows.Listen(sock.handle, 16)
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", os.NewSyscallError("listen", err))
|
||||
}
|
||||
return &HvsockListener{sock: sock, addr: *addr}, nil
|
||||
}
|
||||
|
||||
func (l *HvsockListener) opErr(op string, err error) error {
|
||||
return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err}
|
||||
}
|
||||
|
||||
// Addr returns the listener's network address.
|
||||
func (l *HvsockListener) Addr() net.Addr {
|
||||
return &l.addr
|
||||
}
|
||||
|
||||
// Accept waits for the next connection and returns it.
|
||||
func (l *HvsockListener) Accept() (_ net.Conn, err error) {
|
||||
sock, err := newHVSocket()
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", err)
|
||||
}
|
||||
defer func() {
|
||||
if sock != nil {
|
||||
sock.Close()
|
||||
}
|
||||
}()
|
||||
c, err := l.sock.prepareIO()
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", err)
|
||||
}
|
||||
defer l.sock.wg.Done()
|
||||
|
||||
// AcceptEx, per documentation, requires an extra 16 bytes per address.
|
||||
//
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex
|
||||
const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
|
||||
var addrbuf [addrlen * 2]byte
|
||||
|
||||
var bytes uint32
|
||||
err = windows.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o)
|
||||
if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil {
|
||||
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
|
||||
}
|
||||
|
||||
conn := &HvsockConn{
|
||||
sock: sock,
|
||||
}
|
||||
// The local address returned in the AcceptEx buffer is the same as the Listener socket's
|
||||
// address. However, the service GUID reported by GetSockName is different from the Listeners
|
||||
// socket, and is sometimes the same as the local address of the socket that dialed the
|
||||
// address, with the service GUID.Data1 incremented, but othertimes is different.
|
||||
// todo: does the local address matter? is the listener's address or the actual address appropriate?
|
||||
conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
|
||||
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
|
||||
|
||||
// initialize the accepted socket and update its properties with those of the listening socket
|
||||
if err = windows.Setsockopt(sock.handle,
|
||||
windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT,
|
||||
(*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil {
|
||||
return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err))
|
||||
}
|
||||
|
||||
sock = nil
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// Close closes the listener, causing any pending Accept calls to fail.
|
||||
func (l *HvsockListener) Close() error {
|
||||
return l.sock.Close()
|
||||
}
|
||||
|
||||
// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]).
|
||||
type HvsockDialer struct {
|
||||
// Deadline is the time the Dial operation must connect before erroring.
|
||||
Deadline time.Time
|
||||
|
||||
// Retries is the number of additional connects to try if the connection times out, is refused,
|
||||
// or the host is unreachable
|
||||
Retries uint
|
||||
|
||||
// RetryWait is the time to wait after a connection error to retry
|
||||
RetryWait time.Duration
|
||||
|
||||
rt *time.Timer // redial wait timer
|
||||
}
|
||||
|
||||
// Dial the Hyper-V socket at addr.
|
||||
//
|
||||
// See [HvsockDialer.Dial] for more information.
|
||||
func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
|
||||
return (&HvsockDialer{}).Dial(ctx, addr)
|
||||
}
|
||||
|
||||
// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful.
|
||||
// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between
|
||||
// retries.
|
||||
//
|
||||
// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx.
|
||||
func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
|
||||
op := "dial"
|
||||
// create the conn early to use opErr()
|
||||
conn = &HvsockConn{
|
||||
remote: *addr,
|
||||
}
|
||||
|
||||
if !d.Deadline.IsZero() {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithDeadline(ctx, d.Deadline)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// preemptive timeout/cancellation check
|
||||
if err = ctx.Err(); err != nil {
|
||||
return nil, conn.opErr(op, err)
|
||||
}
|
||||
|
||||
sock, err := newHVSocket()
|
||||
if err != nil {
|
||||
return nil, conn.opErr(op, err)
|
||||
}
|
||||
defer func() {
|
||||
if sock != nil {
|
||||
sock.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
sa := addr.raw()
|
||||
err = socket.Bind(sock.handle, &sa)
|
||||
if err != nil {
|
||||
return nil, conn.opErr(op, os.NewSyscallError("bind", err))
|
||||
}
|
||||
|
||||
c, err := sock.prepareIO()
|
||||
if err != nil {
|
||||
return nil, conn.opErr(op, err)
|
||||
}
|
||||
defer sock.wg.Done()
|
||||
var bytes uint32
|
||||
for i := uint(0); i <= d.Retries; i++ {
|
||||
err = socket.ConnectEx(
|
||||
sock.handle,
|
||||
&sa,
|
||||
nil, // sendBuf
|
||||
0, // sendDataLen
|
||||
&bytes,
|
||||
(*windows.Overlapped)(unsafe.Pointer(&c.o)))
|
||||
_, err = sock.asyncIO(c, nil, bytes, err)
|
||||
if i < d.Retries && canRedial(err) {
|
||||
if err = d.redialWait(ctx); err == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, conn.opErr(op, os.NewSyscallError("connectex", err))
|
||||
}
|
||||
|
||||
// update the connection properties, so shutdown can be used
|
||||
if err = windows.Setsockopt(
|
||||
sock.handle,
|
||||
windows.SOL_SOCKET,
|
||||
windows.SO_UPDATE_CONNECT_CONTEXT,
|
||||
nil, // optvalue
|
||||
0, // optlen
|
||||
); err != nil {
|
||||
return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err))
|
||||
}
|
||||
|
||||
// get the local name
|
||||
var sal rawHvsockAddr
|
||||
err = socket.GetSockName(sock.handle, &sal)
|
||||
if err != nil {
|
||||
return nil, conn.opErr(op, os.NewSyscallError("getsockname", err))
|
||||
}
|
||||
conn.local.fromRaw(&sal)
|
||||
|
||||
// one last check for timeout, since asyncIO doesn't check the context
|
||||
if err = ctx.Err(); err != nil {
|
||||
return nil, conn.opErr(op, err)
|
||||
}
|
||||
|
||||
conn.sock = sock
|
||||
sock = nil
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// redialWait waits before attempting to redial, resetting the timer as appropriate.
|
||||
func (d *HvsockDialer) redialWait(ctx context.Context) (err error) {
|
||||
if d.RetryWait == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if d.rt == nil {
|
||||
d.rt = time.NewTimer(d.RetryWait)
|
||||
} else {
|
||||
// should already be stopped and drained
|
||||
d.rt.Reset(d.RetryWait)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-d.rt.C:
|
||||
return nil
|
||||
}
|
||||
|
||||
// stop and drain the timer
|
||||
if !d.rt.Stop() {
|
||||
<-d.rt.C
|
||||
}
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
// assumes error is a plain, unwrapped windows.Errno provided by direct syscall.
|
||||
func canRedial(err error) bool {
|
||||
//nolint:errorlint // guaranteed to be an Errno
|
||||
switch err {
|
||||
case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT,
|
||||
windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) opErr(op string, err error) error {
|
||||
// translate from "file closed" to "socket closed"
|
||||
if errors.Is(err, ErrFileClosed) {
|
||||
err = socket.ErrSocketClosed
|
||||
}
|
||||
return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) Read(b []byte) (int, error) {
|
||||
c, err := conn.sock.prepareIO()
|
||||
if err != nil {
|
||||
return 0, conn.opErr("read", err)
|
||||
}
|
||||
defer conn.sock.wg.Done()
|
||||
buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
var flags, bytes uint32
|
||||
err = windows.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
|
||||
n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err)
|
||||
if err != nil {
|
||||
var eno windows.Errno
|
||||
if errors.As(err, &eno) {
|
||||
err = os.NewSyscallError("wsarecv", eno)
|
||||
}
|
||||
return 0, conn.opErr("read", err)
|
||||
} else if n == 0 {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) Write(b []byte) (int, error) {
|
||||
t := 0
|
||||
for len(b) != 0 {
|
||||
n, err := conn.write(b)
|
||||
if err != nil {
|
||||
return t + n, err
|
||||
}
|
||||
t += n
|
||||
b = b[n:]
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) write(b []byte) (int, error) {
|
||||
c, err := conn.sock.prepareIO()
|
||||
if err != nil {
|
||||
return 0, conn.opErr("write", err)
|
||||
}
|
||||
defer conn.sock.wg.Done()
|
||||
buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
var bytes uint32
|
||||
err = windows.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
|
||||
n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err)
|
||||
if err != nil {
|
||||
var eno windows.Errno
|
||||
if errors.As(err, &eno) {
|
||||
err = os.NewSyscallError("wsasend", eno)
|
||||
}
|
||||
return 0, conn.opErr("write", err)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close closes the socket connection, failing any pending read or write calls.
|
||||
func (conn *HvsockConn) Close() error {
|
||||
return conn.sock.Close()
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) IsClosed() bool {
|
||||
return conn.sock.IsClosed()
|
||||
}
|
||||
|
||||
// shutdown disables sending or receiving on a socket.
|
||||
func (conn *HvsockConn) shutdown(how int) error {
|
||||
if conn.IsClosed() {
|
||||
return socket.ErrSocketClosed
|
||||
}
|
||||
|
||||
err := windows.Shutdown(conn.sock.handle, how)
|
||||
if err != nil {
|
||||
// If the connection was closed, shutdowns fail with "not connected"
|
||||
if errors.Is(err, windows.WSAENOTCONN) ||
|
||||
errors.Is(err, windows.WSAESHUTDOWN) {
|
||||
err = socket.ErrSocketClosed
|
||||
}
|
||||
return os.NewSyscallError("shutdown", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseRead shuts down the read end of the socket, preventing future read operations.
|
||||
func (conn *HvsockConn) CloseRead() error {
|
||||
err := conn.shutdown(windows.SHUT_RD)
|
||||
if err != nil {
|
||||
return conn.opErr("closeread", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseWrite shuts down the write end of the socket, preventing future write operations and
|
||||
// notifying the other endpoint that no more data will be written.
|
||||
func (conn *HvsockConn) CloseWrite() error {
|
||||
err := conn.shutdown(windows.SHUT_WR)
|
||||
if err != nil {
|
||||
return conn.opErr("closewrite", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LocalAddr returns the local address of the connection.
|
||||
func (conn *HvsockConn) LocalAddr() net.Addr {
|
||||
return &conn.local
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote address of the connection.
|
||||
func (conn *HvsockConn) RemoteAddr() net.Addr {
|
||||
return &conn.remote
|
||||
}
|
||||
|
||||
// SetDeadline implements the net.Conn SetDeadline method.
|
||||
func (conn *HvsockConn) SetDeadline(t time.Time) error {
|
||||
// todo: implement `SetDeadline` for `win32File`
|
||||
if err := conn.SetReadDeadline(t); err != nil {
|
||||
return fmt.Errorf("set read deadline: %w", err)
|
||||
}
|
||||
if err := conn.SetWriteDeadline(t); err != nil {
|
||||
return fmt.Errorf("set write deadline: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetReadDeadline implements the net.Conn SetReadDeadline method.
|
||||
func (conn *HvsockConn) SetReadDeadline(t time.Time) error {
|
||||
return conn.sock.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
|
||||
func (conn *HvsockConn) SetWriteDeadline(t time.Time) error {
|
||||
return conn.sock.SetWriteDeadline(t)
|
||||
}
|
2
e2e/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go
generated
vendored
Normal file
2
e2e/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
// This package contains Win32 filesystem functionality.
|
||||
package fs
|
262
e2e/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go
generated
vendored
Normal file
262
e2e/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go
generated
vendored
Normal file
@ -0,0 +1,262 @@
|
||||
//go:build windows
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/windows"
|
||||
|
||||
"github.com/Microsoft/go-winio/internal/stringbuffer"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go
|
||||
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew
|
||||
//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW
|
||||
|
||||
const NullHandle windows.Handle = 0
|
||||
|
||||
// AccessMask defines standard, specific, and generic rights.
|
||||
//
|
||||
// Used with CreateFile and NtCreateFile (and co.).
|
||||
//
|
||||
// Bitmask:
|
||||
// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
|
||||
// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
|
||||
// +---------------+---------------+-------------------------------+
|
||||
// |G|G|G|G|Resvd|A| StandardRights| SpecificRights |
|
||||
// |R|W|E|A| |S| | |
|
||||
// +-+-------------+---------------+-------------------------------+
|
||||
//
|
||||
// GR Generic Read
|
||||
// GW Generic Write
|
||||
// GE Generic Exectue
|
||||
// GA Generic All
|
||||
// Resvd Reserved
|
||||
// AS Access Security System
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants
|
||||
type AccessMask = windows.ACCESS_MASK
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const (
|
||||
// Not actually any.
|
||||
//
|
||||
// For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device"
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters
|
||||
FILE_ANY_ACCESS AccessMask = 0
|
||||
|
||||
GENERIC_READ AccessMask = 0x8000_0000
|
||||
GENERIC_WRITE AccessMask = 0x4000_0000
|
||||
GENERIC_EXECUTE AccessMask = 0x2000_0000
|
||||
GENERIC_ALL AccessMask = 0x1000_0000
|
||||
ACCESS_SYSTEM_SECURITY AccessMask = 0x0100_0000
|
||||
|
||||
// Specific Object Access
|
||||
// from ntioapi.h
|
||||
|
||||
FILE_READ_DATA AccessMask = (0x0001) // file & pipe
|
||||
FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory
|
||||
|
||||
FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe
|
||||
FILE_ADD_FILE AccessMask = (0x0002) // directory
|
||||
|
||||
FILE_APPEND_DATA AccessMask = (0x0004) // file
|
||||
FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory
|
||||
FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe
|
||||
|
||||
FILE_READ_EA AccessMask = (0x0008) // file & directory
|
||||
FILE_READ_PROPERTIES AccessMask = FILE_READ_EA
|
||||
|
||||
FILE_WRITE_EA AccessMask = (0x0010) // file & directory
|
||||
FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA
|
||||
|
||||
FILE_EXECUTE AccessMask = (0x0020) // file
|
||||
FILE_TRAVERSE AccessMask = (0x0020) // directory
|
||||
|
||||
FILE_DELETE_CHILD AccessMask = (0x0040) // directory
|
||||
|
||||
FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all
|
||||
|
||||
FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all
|
||||
|
||||
FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF)
|
||||
FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE)
|
||||
FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE)
|
||||
FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE)
|
||||
|
||||
SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF
|
||||
|
||||
// Standard Access
|
||||
// from ntseapi.h
|
||||
|
||||
DELETE AccessMask = 0x0001_0000
|
||||
READ_CONTROL AccessMask = 0x0002_0000
|
||||
WRITE_DAC AccessMask = 0x0004_0000
|
||||
WRITE_OWNER AccessMask = 0x0008_0000
|
||||
SYNCHRONIZE AccessMask = 0x0010_0000
|
||||
|
||||
STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000
|
||||
|
||||
STANDARD_RIGHTS_READ AccessMask = READ_CONTROL
|
||||
STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL
|
||||
STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL
|
||||
|
||||
STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000
|
||||
)
|
||||
|
||||
type FileShareMode uint32
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const (
|
||||
FILE_SHARE_NONE FileShareMode = 0x00
|
||||
FILE_SHARE_READ FileShareMode = 0x01
|
||||
FILE_SHARE_WRITE FileShareMode = 0x02
|
||||
FILE_SHARE_DELETE FileShareMode = 0x04
|
||||
FILE_SHARE_VALID_FLAGS FileShareMode = 0x07
|
||||
)
|
||||
|
||||
type FileCreationDisposition uint32
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const (
|
||||
// from winbase.h
|
||||
|
||||
CREATE_NEW FileCreationDisposition = 0x01
|
||||
CREATE_ALWAYS FileCreationDisposition = 0x02
|
||||
OPEN_EXISTING FileCreationDisposition = 0x03
|
||||
OPEN_ALWAYS FileCreationDisposition = 0x04
|
||||
TRUNCATE_EXISTING FileCreationDisposition = 0x05
|
||||
)
|
||||
|
||||
// Create disposition values for NtCreate*
|
||||
type NTFileCreationDisposition uint32
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const (
|
||||
// From ntioapi.h
|
||||
|
||||
FILE_SUPERSEDE NTFileCreationDisposition = 0x00
|
||||
FILE_OPEN NTFileCreationDisposition = 0x01
|
||||
FILE_CREATE NTFileCreationDisposition = 0x02
|
||||
FILE_OPEN_IF NTFileCreationDisposition = 0x03
|
||||
FILE_OVERWRITE NTFileCreationDisposition = 0x04
|
||||
FILE_OVERWRITE_IF NTFileCreationDisposition = 0x05
|
||||
FILE_MAXIMUM_DISPOSITION NTFileCreationDisposition = 0x05
|
||||
)
|
||||
|
||||
// CreateFile and co. take flags or attributes together as one parameter.
|
||||
// Define alias until we can use generics to allow both
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants
|
||||
type FileFlagOrAttribute uint32
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const (
|
||||
// from winnt.h
|
||||
|
||||
FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000
|
||||
FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000
|
||||
FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000
|
||||
FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000
|
||||
FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000
|
||||
FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000
|
||||
FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000
|
||||
FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000
|
||||
FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000
|
||||
FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000
|
||||
FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000
|
||||
)
|
||||
|
||||
// NtCreate* functions take a dedicated CreateOptions parameter.
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/Winternl/nf-winternl-ntcreatefile
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/devnotes/nt-create-named-pipe-file
|
||||
type NTCreateOptions uint32
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const (
|
||||
// From ntioapi.h
|
||||
|
||||
FILE_DIRECTORY_FILE NTCreateOptions = 0x0000_0001
|
||||
FILE_WRITE_THROUGH NTCreateOptions = 0x0000_0002
|
||||
FILE_SEQUENTIAL_ONLY NTCreateOptions = 0x0000_0004
|
||||
FILE_NO_INTERMEDIATE_BUFFERING NTCreateOptions = 0x0000_0008
|
||||
|
||||
FILE_SYNCHRONOUS_IO_ALERT NTCreateOptions = 0x0000_0010
|
||||
FILE_SYNCHRONOUS_IO_NONALERT NTCreateOptions = 0x0000_0020
|
||||
FILE_NON_DIRECTORY_FILE NTCreateOptions = 0x0000_0040
|
||||
FILE_CREATE_TREE_CONNECTION NTCreateOptions = 0x0000_0080
|
||||
|
||||
FILE_COMPLETE_IF_OPLOCKED NTCreateOptions = 0x0000_0100
|
||||
FILE_NO_EA_KNOWLEDGE NTCreateOptions = 0x0000_0200
|
||||
FILE_DISABLE_TUNNELING NTCreateOptions = 0x0000_0400
|
||||
FILE_RANDOM_ACCESS NTCreateOptions = 0x0000_0800
|
||||
|
||||
FILE_DELETE_ON_CLOSE NTCreateOptions = 0x0000_1000
|
||||
FILE_OPEN_BY_FILE_ID NTCreateOptions = 0x0000_2000
|
||||
FILE_OPEN_FOR_BACKUP_INTENT NTCreateOptions = 0x0000_4000
|
||||
FILE_NO_COMPRESSION NTCreateOptions = 0x0000_8000
|
||||
)
|
||||
|
||||
type FileSQSFlag = FileFlagOrAttribute
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const (
|
||||
// from winbase.h
|
||||
|
||||
SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16)
|
||||
SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16)
|
||||
SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16)
|
||||
SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16)
|
||||
|
||||
SECURITY_SQOS_PRESENT FileSQSFlag = 0x0010_0000
|
||||
SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F_0000
|
||||
)
|
||||
|
||||
// GetFinalPathNameByHandle flags
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters
|
||||
type GetFinalPathFlag uint32
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const (
|
||||
GetFinalPathDefaultFlag GetFinalPathFlag = 0x0
|
||||
|
||||
FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0
|
||||
FILE_NAME_OPENED GetFinalPathFlag = 0x8
|
||||
|
||||
VOLUME_NAME_DOS GetFinalPathFlag = 0x0
|
||||
VOLUME_NAME_GUID GetFinalPathFlag = 0x1
|
||||
VOLUME_NAME_NT GetFinalPathFlag = 0x2
|
||||
VOLUME_NAME_NONE GetFinalPathFlag = 0x4
|
||||
)
|
||||
|
||||
// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle
|
||||
// with the given handle and flags. It transparently takes care of creating a buffer of the
|
||||
// correct size for the call.
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew
|
||||
func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) {
|
||||
b := stringbuffer.NewWString()
|
||||
//TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n?
|
||||
for {
|
||||
n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// If the buffer wasn't large enough, n will be the total size needed (including null terminator).
|
||||
// Resize and try again.
|
||||
if n > b.Cap() {
|
||||
b.ResizeTo(n)
|
||||
continue
|
||||
}
|
||||
// If the buffer is large enough, n will be the size not including the null terminator.
|
||||
// Convert to a Go string and return.
|
||||
return b.String(), nil
|
||||
}
|
||||
}
|
12
e2e/vendor/github.com/Microsoft/go-winio/internal/fs/security.go
generated
vendored
Normal file
12
e2e/vendor/github.com/Microsoft/go-winio/internal/fs/security.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
package fs
|
||||
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level
|
||||
type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32`
|
||||
|
||||
// Impersonation levels
|
||||
const (
|
||||
SecurityAnonymous SecurityImpersonationLevel = 0
|
||||
SecurityIdentification SecurityImpersonationLevel = 1
|
||||
SecurityImpersonation SecurityImpersonationLevel = 2
|
||||
SecurityDelegation SecurityImpersonationLevel = 3
|
||||
)
|
61
e2e/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go
generated
vendored
Normal file
61
e2e/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
//go:build windows
|
||||
|
||||
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
errERROR_EINVAL error = syscall.EINVAL
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return errERROR_EINVAL
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
|
||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||
)
|
||||
|
||||
func CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
||||
}
|
||||
|
||||
func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
|
||||
r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile))
|
||||
handle = windows.Handle(r0)
|
||||
if handle == windows.InvalidHandle {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
20
e2e/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go
generated
vendored
Normal file
20
e2e/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
package socket
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The
|
||||
// struct must meet the Win32 sockaddr requirements specified here:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2
|
||||
//
|
||||
// Specifically, the struct size must be least larger than an int16 (unsigned short)
|
||||
// for the address family.
|
||||
type RawSockaddr interface {
|
||||
// Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing
|
||||
// for the RawSockaddr's data to be overwritten by syscalls (if necessary).
|
||||
//
|
||||
// It is the callers responsibility to validate that the values are valid; invalid
|
||||
// pointers or size can cause a panic.
|
||||
Sockaddr() (unsafe.Pointer, int32, error)
|
||||
}
|
177
e2e/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
generated
vendored
Normal file
177
e2e/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
||||
//go:build windows
|
||||
|
||||
package socket
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go
|
||||
|
||||
//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname
|
||||
//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername
|
||||
//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
|
||||
|
||||
const socketError = uintptr(^uint32(0))
|
||||
|
||||
var (
|
||||
// todo(helsaawy): create custom error types to store the desired vs actual size and addr family?
|
||||
|
||||
ErrBufferSize = errors.New("buffer size")
|
||||
ErrAddrFamily = errors.New("address family")
|
||||
ErrInvalidPointer = errors.New("invalid pointer")
|
||||
ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed)
|
||||
)
|
||||
|
||||
// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error)
|
||||
|
||||
// GetSockName writes the local address of socket s to the [RawSockaddr] rsa.
|
||||
// If rsa is not large enough, the [windows.WSAEFAULT] is returned.
|
||||
func GetSockName(s windows.Handle, rsa RawSockaddr) error {
|
||||
ptr, l, err := rsa.Sockaddr()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
|
||||
}
|
||||
|
||||
// although getsockname returns WSAEFAULT if the buffer is too small, it does not set
|
||||
// &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy
|
||||
return getsockname(s, ptr, &l)
|
||||
}
|
||||
|
||||
// GetPeerName returns the remote address the socket is connected to.
|
||||
//
|
||||
// See [GetSockName] for more information.
|
||||
func GetPeerName(s windows.Handle, rsa RawSockaddr) error {
|
||||
ptr, l, err := rsa.Sockaddr()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
|
||||
}
|
||||
|
||||
return getpeername(s, ptr, &l)
|
||||
}
|
||||
|
||||
func Bind(s windows.Handle, rsa RawSockaddr) (err error) {
|
||||
ptr, l, err := rsa.Sockaddr()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
|
||||
}
|
||||
|
||||
return bind(s, ptr, l)
|
||||
}
|
||||
|
||||
// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the
|
||||
// their sockaddr interface, so they cannot be used with HvsockAddr
|
||||
// Replicate functionality here from
|
||||
// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go
|
||||
|
||||
// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at
|
||||
// runtime via a WSAIoctl call:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks
|
||||
|
||||
type runtimeFunc struct {
|
||||
id guid.GUID
|
||||
once sync.Once
|
||||
addr uintptr
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *runtimeFunc) Load() error {
|
||||
f.once.Do(func() {
|
||||
var s windows.Handle
|
||||
s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP)
|
||||
if f.err != nil {
|
||||
return
|
||||
}
|
||||
defer windows.CloseHandle(s) //nolint:errcheck
|
||||
|
||||
var n uint32
|
||||
f.err = windows.WSAIoctl(s,
|
||||
windows.SIO_GET_EXTENSION_FUNCTION_POINTER,
|
||||
(*byte)(unsafe.Pointer(&f.id)),
|
||||
uint32(unsafe.Sizeof(f.id)),
|
||||
(*byte)(unsafe.Pointer(&f.addr)),
|
||||
uint32(unsafe.Sizeof(f.addr)),
|
||||
&n,
|
||||
nil, // overlapped
|
||||
0, // completionRoutine
|
||||
)
|
||||
})
|
||||
return f.err
|
||||
}
|
||||
|
||||
var (
|
||||
// todo: add `AcceptEx` and `GetAcceptExSockaddrs`
|
||||
WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS
|
||||
Data1: 0x25a207b9,
|
||||
Data2: 0xddf3,
|
||||
Data3: 0x4660,
|
||||
Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e},
|
||||
}
|
||||
|
||||
connectExFunc = runtimeFunc{id: WSAID_CONNECTEX}
|
||||
)
|
||||
|
||||
func ConnectEx(
|
||||
fd windows.Handle,
|
||||
rsa RawSockaddr,
|
||||
sendBuf *byte,
|
||||
sendDataLen uint32,
|
||||
bytesSent *uint32,
|
||||
overlapped *windows.Overlapped,
|
||||
) error {
|
||||
if err := connectExFunc.Load(); err != nil {
|
||||
return fmt.Errorf("failed to load ConnectEx function pointer: %w", err)
|
||||
}
|
||||
ptr, n, err := rsa.Sockaddr()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped)
|
||||
}
|
||||
|
||||
// BOOL LpfnConnectex(
|
||||
// [in] SOCKET s,
|
||||
// [in] const sockaddr *name,
|
||||
// [in] int namelen,
|
||||
// [in, optional] PVOID lpSendBuffer,
|
||||
// [in] DWORD dwSendDataLength,
|
||||
// [out] LPDWORD lpdwBytesSent,
|
||||
// [in] LPOVERLAPPED lpOverlapped
|
||||
// )
|
||||
|
||||
func connectEx(
|
||||
s windows.Handle,
|
||||
name unsafe.Pointer,
|
||||
namelen int32,
|
||||
sendBuf *byte,
|
||||
sendDataLen uint32,
|
||||
bytesSent *uint32,
|
||||
overlapped *windows.Overlapped,
|
||||
) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(connectExFunc.addr,
|
||||
uintptr(s),
|
||||
uintptr(name),
|
||||
uintptr(namelen),
|
||||
uintptr(unsafe.Pointer(sendBuf)),
|
||||
uintptr(sendDataLen),
|
||||
uintptr(unsafe.Pointer(bytesSent)),
|
||||
uintptr(unsafe.Pointer(overlapped)),
|
||||
)
|
||||
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = error(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
69
e2e/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
generated
vendored
Normal file
69
e2e/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
//go:build windows
|
||||
|
||||
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
|
||||
|
||||
package socket
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
errERROR_EINVAL error = syscall.EINVAL
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return errERROR_EINVAL
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
|
||||
|
||||
procbind = modws2_32.NewProc("bind")
|
||||
procgetpeername = modws2_32.NewProc("getpeername")
|
||||
procgetsockname = modws2_32.NewProc("getsockname")
|
||||
)
|
||||
|
||||
func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
|
||||
if r1 == socketError {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
|
||||
if r1 == socketError {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
|
||||
if r1 == socketError {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
132
e2e/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go
generated
vendored
Normal file
132
e2e/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
package stringbuffer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
// TODO: worth exporting and using in mkwinsyscall?
|
||||
|
||||
// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate
|
||||
// large path strings:
|
||||
// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310.
|
||||
const MinWStringCap = 310
|
||||
|
||||
// use *[]uint16 since []uint16 creates an extra allocation where the slice header
|
||||
// is copied to heap and then referenced via pointer in the interface header that sync.Pool
|
||||
// stores.
|
||||
var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly
|
||||
New: func() interface{} {
|
||||
b := make([]uint16, MinWStringCap)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
|
||||
func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) }
|
||||
|
||||
// freeBuffer copies the slice header data, and puts a pointer to that in the pool.
|
||||
// This avoids taking a pointer to the slice header in WString, which can be set to nil.
|
||||
func freeBuffer(b []uint16) { pathPool.Put(&b) }
|
||||
|
||||
// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings
|
||||
// for interacting with Win32 APIs.
|
||||
// Sizes are specified as uint32 and not int.
|
||||
//
|
||||
// It is not thread safe.
|
||||
type WString struct {
|
||||
// type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future.
|
||||
|
||||
// raw buffer
|
||||
b []uint16
|
||||
}
|
||||
|
||||
// NewWString returns a [WString] allocated from a shared pool with an
|
||||
// initial capacity of at least [MinWStringCap].
|
||||
// Since the buffer may have been previously used, its contents are not guaranteed to be empty.
|
||||
//
|
||||
// The buffer should be freed via [WString.Free]
|
||||
func NewWString() *WString {
|
||||
return &WString{
|
||||
b: newBuffer(),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *WString) Free() {
|
||||
if b.empty() {
|
||||
return
|
||||
}
|
||||
freeBuffer(b.b)
|
||||
b.b = nil
|
||||
}
|
||||
|
||||
// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the
|
||||
// previous buffer back into pool.
|
||||
func (b *WString) ResizeTo(c uint32) uint32 {
|
||||
// already sufficient (or n is 0)
|
||||
if c <= b.Cap() {
|
||||
return b.Cap()
|
||||
}
|
||||
|
||||
if c <= MinWStringCap {
|
||||
c = MinWStringCap
|
||||
}
|
||||
// allocate at-least double buffer size, as is done in [bytes.Buffer] and other places
|
||||
if c <= 2*b.Cap() {
|
||||
c = 2 * b.Cap()
|
||||
}
|
||||
|
||||
b2 := make([]uint16, c)
|
||||
if !b.empty() {
|
||||
copy(b2, b.b)
|
||||
freeBuffer(b.b)
|
||||
}
|
||||
b.b = b2
|
||||
return c
|
||||
}
|
||||
|
||||
// Buffer returns the underlying []uint16 buffer.
|
||||
func (b *WString) Buffer() []uint16 {
|
||||
if b.empty() {
|
||||
return nil
|
||||
}
|
||||
return b.b
|
||||
}
|
||||
|
||||
// Pointer returns a pointer to the first uint16 in the buffer.
|
||||
// If the [WString.Free] has already been called, the pointer will be nil.
|
||||
func (b *WString) Pointer() *uint16 {
|
||||
if b.empty() {
|
||||
return nil
|
||||
}
|
||||
return &b.b[0]
|
||||
}
|
||||
|
||||
// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer.
|
||||
//
|
||||
// It assumes that the data is null-terminated.
|
||||
func (b *WString) String() string {
|
||||
// Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows"
|
||||
// and would make this code Windows-only, which makes no sense.
|
||||
// So copy UTF16ToString code into here.
|
||||
// If other windows-specific code is added, switch to [windows.UTF16ToString]
|
||||
|
||||
s := b.b
|
||||
for i, v := range s {
|
||||
if v == 0 {
|
||||
s = s[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
return string(utf16.Decode(s))
|
||||
}
|
||||
|
||||
// Cap returns the underlying buffer capacity.
|
||||
func (b *WString) Cap() uint32 {
|
||||
if b.empty() {
|
||||
return 0
|
||||
}
|
||||
return b.cap()
|
||||
}
|
||||
|
||||
func (b *WString) cap() uint32 { return uint32(cap(b.b)) }
|
||||
func (b *WString) empty() bool { return b == nil || b.cap() == 0 }
|
586
e2e/vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
Normal file
586
e2e/vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
Normal file
@ -0,0 +1,586 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
|
||||
"github.com/Microsoft/go-winio/internal/fs"
|
||||
)
|
||||
|
||||
//sys connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) = ConnectNamedPipe
|
||||
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateNamedPipeW
|
||||
//sys disconnectNamedPipe(pipe windows.Handle) (err error) = DisconnectNamedPipe
|
||||
//sys getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||
//sys getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||
//sys ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile
|
||||
//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
|
||||
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U
|
||||
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl
|
||||
|
||||
type PipeConn interface {
|
||||
net.Conn
|
||||
Disconnect() error
|
||||
Flush() error
|
||||
}
|
||||
|
||||
// type aliases for mkwinsyscall code
|
||||
type (
|
||||
ntAccessMask = fs.AccessMask
|
||||
ntFileShareMode = fs.FileShareMode
|
||||
ntFileCreationDisposition = fs.NTFileCreationDisposition
|
||||
ntFileOptions = fs.NTCreateOptions
|
||||
)
|
||||
|
||||
type ioStatusBlock struct {
|
||||
Status, Information uintptr
|
||||
}
|
||||
|
||||
// typedef struct _OBJECT_ATTRIBUTES {
|
||||
// ULONG Length;
|
||||
// HANDLE RootDirectory;
|
||||
// PUNICODE_STRING ObjectName;
|
||||
// ULONG Attributes;
|
||||
// PVOID SecurityDescriptor;
|
||||
// PVOID SecurityQualityOfService;
|
||||
// } OBJECT_ATTRIBUTES;
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes
|
||||
type objectAttributes struct {
|
||||
Length uintptr
|
||||
RootDirectory uintptr
|
||||
ObjectName *unicodeString
|
||||
Attributes uintptr
|
||||
SecurityDescriptor *securityDescriptor
|
||||
SecurityQoS uintptr
|
||||
}
|
||||
|
||||
type unicodeString struct {
|
||||
Length uint16
|
||||
MaximumLength uint16
|
||||
Buffer uintptr
|
||||
}
|
||||
|
||||
// typedef struct _SECURITY_DESCRIPTOR {
|
||||
// BYTE Revision;
|
||||
// BYTE Sbz1;
|
||||
// SECURITY_DESCRIPTOR_CONTROL Control;
|
||||
// PSID Owner;
|
||||
// PSID Group;
|
||||
// PACL Sacl;
|
||||
// PACL Dacl;
|
||||
// } SECURITY_DESCRIPTOR, *PISECURITY_DESCRIPTOR;
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_descriptor
|
||||
type securityDescriptor struct {
|
||||
Revision byte
|
||||
Sbz1 byte
|
||||
Control uint16
|
||||
Owner uintptr
|
||||
Group uintptr
|
||||
Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl
|
||||
Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl
|
||||
}
|
||||
|
||||
type ntStatus int32
|
||||
|
||||
func (status ntStatus) Err() error {
|
||||
if status >= 0 {
|
||||
return nil
|
||||
}
|
||||
return rtlNtStatusToDosError(status)
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
|
||||
ErrPipeListenerClosed = net.ErrClosed
|
||||
|
||||
errPipeWriteClosed = errors.New("pipe has been closed for write")
|
||||
)
|
||||
|
||||
type win32Pipe struct {
|
||||
*win32File
|
||||
path string
|
||||
}
|
||||
|
||||
var _ PipeConn = (*win32Pipe)(nil)
|
||||
|
||||
type win32MessageBytePipe struct {
|
||||
win32Pipe
|
||||
writeClosed bool
|
||||
readEOF bool
|
||||
}
|
||||
|
||||
type pipeAddress string
|
||||
|
||||
func (f *win32Pipe) LocalAddr() net.Addr {
|
||||
return pipeAddress(f.path)
|
||||
}
|
||||
|
||||
func (f *win32Pipe) RemoteAddr() net.Addr {
|
||||
return pipeAddress(f.path)
|
||||
}
|
||||
|
||||
func (f *win32Pipe) SetDeadline(t time.Time) error {
|
||||
if err := f.SetReadDeadline(t); err != nil {
|
||||
return err
|
||||
}
|
||||
return f.SetWriteDeadline(t)
|
||||
}
|
||||
|
||||
func (f *win32Pipe) Disconnect() error {
|
||||
return disconnectNamedPipe(f.win32File.handle)
|
||||
}
|
||||
|
||||
// CloseWrite closes the write side of a message pipe in byte mode.
|
||||
func (f *win32MessageBytePipe) CloseWrite() error {
|
||||
if f.writeClosed {
|
||||
return errPipeWriteClosed
|
||||
}
|
||||
err := f.win32File.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = f.win32File.Write(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.writeClosed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since
|
||||
// they are used to implement CloseWrite().
|
||||
func (f *win32MessageBytePipe) Write(b []byte) (int, error) {
|
||||
if f.writeClosed {
|
||||
return 0, errPipeWriteClosed
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return f.win32File.Write(b)
|
||||
}
|
||||
|
||||
// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message
|
||||
// mode pipe will return io.EOF, as will all subsequent reads.
|
||||
func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
|
||||
if f.readEOF {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err := f.win32File.Read(b)
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
// If this was the result of a zero-byte read, then
|
||||
// it is possible that the read was due to a zero-size
|
||||
// message. Since we are simulating CloseWrite with a
|
||||
// zero-byte message, ensure that all future Read() calls
|
||||
// also return EOF.
|
||||
f.readEOF = true
|
||||
} else if err == windows.ERROR_MORE_DATA { //nolint:errorlint // err is Errno
|
||||
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
|
||||
// and the message still has more bytes. Treat this as a success, since
|
||||
// this package presents all named pipes as byte streams.
|
||||
err = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (pipeAddress) Network() string {
|
||||
return "pipe"
|
||||
}
|
||||
|
||||
func (s pipeAddress) String() string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||
func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask, impLevel PipeImpLevel) (windows.Handle, error) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return windows.Handle(0), ctx.Err()
|
||||
default:
|
||||
h, err := fs.CreateFile(*path,
|
||||
access,
|
||||
0, // mode
|
||||
nil, // security attributes
|
||||
fs.OPEN_EXISTING,
|
||||
fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.FileSQSFlag(impLevel),
|
||||
0, // template file handle
|
||||
)
|
||||
if err == nil {
|
||||
return h, nil
|
||||
}
|
||||
if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno
|
||||
return h, &os.PathError{Err: err, Op: "open", Path: *path}
|
||||
}
|
||||
// Wait 10 msec and try again. This is a rather simplistic
|
||||
// view, as we always try each 10 milliseconds.
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DialPipe connects to a named pipe by path, timing out if the connection
|
||||
// takes longer than the specified duration. If timeout is nil, then we use
|
||||
// a default timeout of 2 seconds. (We do not use WaitNamedPipe.)
|
||||
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||
var absTimeout time.Time
|
||||
if timeout != nil {
|
||||
absTimeout = time.Now().Add(*timeout)
|
||||
} else {
|
||||
absTimeout = time.Now().Add(2 * time.Second)
|
||||
}
|
||||
ctx, cancel := context.WithDeadline(context.Background(), absTimeout)
|
||||
defer cancel()
|
||||
conn, err := DialPipeContext(ctx, path)
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, ErrTimeout
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
||||
return DialPipeAccess(ctx, path, uint32(fs.GENERIC_READ|fs.GENERIC_WRITE))
|
||||
}
|
||||
|
||||
// PipeImpLevel is an enumeration of impersonation levels that may be set
|
||||
// when calling DialPipeAccessImpersonation.
|
||||
type PipeImpLevel uint32
|
||||
|
||||
const (
|
||||
PipeImpLevelAnonymous = PipeImpLevel(fs.SECURITY_ANONYMOUS)
|
||||
PipeImpLevelIdentification = PipeImpLevel(fs.SECURITY_IDENTIFICATION)
|
||||
PipeImpLevelImpersonation = PipeImpLevel(fs.SECURITY_IMPERSONATION)
|
||||
PipeImpLevelDelegation = PipeImpLevel(fs.SECURITY_DELEGATION)
|
||||
)
|
||||
|
||||
// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
|
||||
return DialPipeAccessImpLevel(ctx, path, access, PipeImpLevelAnonymous)
|
||||
}
|
||||
|
||||
// DialPipeAccessImpLevel attempts to connect to a named pipe by `path` with
|
||||
// `access` at `impLevel` until `ctx` cancellation or timeout. The other
|
||||
// DialPipe* implementations use PipeImpLevelAnonymous.
|
||||
func DialPipeAccessImpLevel(ctx context.Context, path string, access uint32, impLevel PipeImpLevel) (net.Conn, error) {
|
||||
var err error
|
||||
var h windows.Handle
|
||||
h, err = tryDialPipe(ctx, &path, fs.AccessMask(access), impLevel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var flags uint32
|
||||
err = getNamedPipeInfo(h, &flags, nil, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
windows.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the pipe is in message mode, return a message byte pipe, which
|
||||
// supports CloseWrite().
|
||||
if flags&windows.PIPE_TYPE_MESSAGE != 0 {
|
||||
return &win32MessageBytePipe{
|
||||
win32Pipe: win32Pipe{win32File: f, path: path},
|
||||
}, nil
|
||||
}
|
||||
return &win32Pipe{win32File: f, path: path}, nil
|
||||
}
|
||||
|
||||
type acceptResponse struct {
|
||||
f *win32File
|
||||
err error
|
||||
}
|
||||
|
||||
type win32PipeListener struct {
|
||||
firstHandle windows.Handle
|
||||
path string
|
||||
config PipeConfig
|
||||
acceptCh chan (chan acceptResponse)
|
||||
closeCh chan int
|
||||
doneCh chan int
|
||||
}
|
||||
|
||||
func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (windows.Handle, error) {
|
||||
path16, err := windows.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
|
||||
var oa objectAttributes
|
||||
oa.Length = unsafe.Sizeof(oa)
|
||||
|
||||
var ntPath unicodeString
|
||||
if err := rtlDosPathNameToNtPathName(&path16[0],
|
||||
&ntPath,
|
||||
0,
|
||||
0,
|
||||
).Err(); err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
defer windows.LocalFree(windows.Handle(ntPath.Buffer)) //nolint:errcheck
|
||||
oa.ObjectName = &ntPath
|
||||
oa.Attributes = windows.OBJ_CASE_INSENSITIVE
|
||||
|
||||
// The security descriptor is only needed for the first pipe.
|
||||
if first {
|
||||
if sd != nil {
|
||||
//todo: does `sdb` need to be allocated on the heap, or can go allocate it?
|
||||
l := uint32(len(sd))
|
||||
sdb, err := windows.LocalAlloc(0, l)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("LocalAlloc for security descriptor with of length %d: %w", l, err)
|
||||
}
|
||||
defer windows.LocalFree(windows.Handle(sdb)) //nolint:errcheck
|
||||
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
|
||||
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
|
||||
} else {
|
||||
// Construct the default named pipe security descriptor.
|
||||
var dacl uintptr
|
||||
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
|
||||
return 0, fmt.Errorf("getting default named pipe ACL: %w", err)
|
||||
}
|
||||
defer windows.LocalFree(windows.Handle(dacl)) //nolint:errcheck
|
||||
|
||||
sdb := &securityDescriptor{
|
||||
Revision: 1,
|
||||
Control: windows.SE_DACL_PRESENT,
|
||||
Dacl: dacl,
|
||||
}
|
||||
oa.SecurityDescriptor = sdb
|
||||
}
|
||||
}
|
||||
|
||||
typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS)
|
||||
if c.MessageMode {
|
||||
typ |= windows.FILE_PIPE_MESSAGE_TYPE
|
||||
}
|
||||
|
||||
disposition := fs.FILE_OPEN
|
||||
access := fs.GENERIC_READ | fs.GENERIC_WRITE | fs.SYNCHRONIZE
|
||||
if first {
|
||||
disposition = fs.FILE_CREATE
|
||||
// By not asking for read or write access, the named pipe file system
|
||||
// will put this pipe into an initially disconnected state, blocking
|
||||
// client connections until the next call with first == false.
|
||||
access = fs.SYNCHRONIZE
|
||||
}
|
||||
|
||||
timeout := int64(-50 * 10000) // 50ms
|
||||
|
||||
var (
|
||||
h windows.Handle
|
||||
iosb ioStatusBlock
|
||||
)
|
||||
err = ntCreateNamedPipeFile(&h,
|
||||
access,
|
||||
&oa,
|
||||
&iosb,
|
||||
fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE,
|
||||
disposition,
|
||||
0,
|
||||
typ,
|
||||
0,
|
||||
0,
|
||||
0xffffffff,
|
||||
uint32(c.InputBufferSize),
|
||||
uint32(c.OutputBufferSize),
|
||||
&timeout).Err()
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(ntPath)
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
|
||||
h, err := makeServerPipeHandle(l.path, nil, &l.config, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
windows.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
|
||||
p, err := l.makeServerPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wait for the client to connect.
|
||||
ch := make(chan error)
|
||||
go func(p *win32File) {
|
||||
ch <- connectPipe(p)
|
||||
}(p)
|
||||
|
||||
select {
|
||||
case err = <-ch:
|
||||
if err != nil {
|
||||
p.Close()
|
||||
p = nil
|
||||
}
|
||||
case <-l.closeCh:
|
||||
// Abort the connect request by closing the handle.
|
||||
p.Close()
|
||||
p = nil
|
||||
err = <-ch
|
||||
if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno
|
||||
err = ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) listenerRoutine() {
|
||||
closed := false
|
||||
for !closed {
|
||||
select {
|
||||
case <-l.closeCh:
|
||||
closed = true
|
||||
case responseCh := <-l.acceptCh:
|
||||
var (
|
||||
p *win32File
|
||||
err error
|
||||
)
|
||||
for {
|
||||
p, err = l.makeConnectedServerPipe()
|
||||
// If the connection was immediately closed by the client, try
|
||||
// again.
|
||||
if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno
|
||||
break
|
||||
}
|
||||
}
|
||||
responseCh <- acceptResponse{p, err}
|
||||
closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno
|
||||
}
|
||||
}
|
||||
windows.Close(l.firstHandle)
|
||||
l.firstHandle = 0
|
||||
// Notify Close() and Accept() callers that the handle has been closed.
|
||||
close(l.doneCh)
|
||||
}
|
||||
|
||||
// PipeConfig contain configuration for the pipe listener.
|
||||
type PipeConfig struct {
|
||||
// SecurityDescriptor contains a Windows security descriptor in SDDL format.
|
||||
SecurityDescriptor string
|
||||
|
||||
// MessageMode determines whether the pipe is in byte or message mode. In either
|
||||
// case the pipe is read in byte mode by default. The only practical difference in
|
||||
// this implementation is that CloseWrite() is only supported for message mode pipes;
|
||||
// CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only
|
||||
// transferred to the reader (and returned as io.EOF in this implementation)
|
||||
// when the pipe is in message mode.
|
||||
MessageMode bool
|
||||
|
||||
// InputBufferSize specifies the size of the input buffer, in bytes.
|
||||
InputBufferSize int32
|
||||
|
||||
// OutputBufferSize specifies the size of the output buffer, in bytes.
|
||||
OutputBufferSize int32
|
||||
}
|
||||
|
||||
// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe.
|
||||
// The pipe must not already exist.
|
||||
func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
|
||||
var (
|
||||
sd []byte
|
||||
err error
|
||||
)
|
||||
if c == nil {
|
||||
c = &PipeConfig{}
|
||||
}
|
||||
if c.SecurityDescriptor != "" {
|
||||
sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
h, err := makeServerPipeHandle(path, sd, c, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l := &win32PipeListener{
|
||||
firstHandle: h,
|
||||
path: path,
|
||||
config: *c,
|
||||
acceptCh: make(chan (chan acceptResponse)),
|
||||
closeCh: make(chan int),
|
||||
doneCh: make(chan int),
|
||||
}
|
||||
go l.listenerRoutine()
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func connectPipe(p *win32File) error {
|
||||
c, err := p.prepareIO()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer p.wg.Done()
|
||||
|
||||
err = connectNamedPipe(p.handle, &c.o)
|
||||
_, err = p.asyncIO(c, nil, 0, err)
|
||||
if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) Accept() (net.Conn, error) {
|
||||
ch := make(chan acceptResponse)
|
||||
select {
|
||||
case l.acceptCh <- ch:
|
||||
response := <-ch
|
||||
err := response.err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if l.config.MessageMode {
|
||||
return &win32MessageBytePipe{
|
||||
win32Pipe: win32Pipe{win32File: response.f, path: l.path},
|
||||
}, nil
|
||||
}
|
||||
return &win32Pipe{win32File: response.f, path: l.path}, nil
|
||||
case <-l.doneCh:
|
||||
return nil, ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) Close() error {
|
||||
select {
|
||||
case l.closeCh <- 1:
|
||||
<-l.doneCh
|
||||
case <-l.doneCh:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) Addr() net.Addr {
|
||||
return pipeAddress(l.path)
|
||||
}
|
232
e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
Normal file
232
e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
Normal file
@ -0,0 +1,232 @@
|
||||
// Package guid provides a GUID type. The backing structure for a GUID is
|
||||
// identical to that used by the golang.org/x/sys/windows GUID type.
|
||||
// There are two main binary encodings used for a GUID, the big-endian encoding,
|
||||
// and the Windows (mixed-endian) encoding. See here for details:
|
||||
// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding
|
||||
package guid
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha1" //nolint:gosec // not used for secure application
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment
|
||||
|
||||
// Variant specifies which GUID variant (or "type") of the GUID. It determines
|
||||
// how the entirety of the rest of the GUID is interpreted.
|
||||
type Variant uint8
|
||||
|
||||
// The variants specified by RFC 4122 section 4.1.1.
|
||||
const (
|
||||
// VariantUnknown specifies a GUID variant which does not conform to one of
|
||||
// the variant encodings specified in RFC 4122.
|
||||
VariantUnknown Variant = iota
|
||||
VariantNCS
|
||||
VariantRFC4122 // RFC 4122
|
||||
VariantMicrosoft
|
||||
VariantFuture
|
||||
)
|
||||
|
||||
// Version specifies how the bits in the GUID were generated. For instance, a
|
||||
// version 4 GUID is randomly generated, and a version 5 is generated from the
|
||||
// hash of an input string.
|
||||
type Version uint8
|
||||
|
||||
func (v Version) String() string {
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
}
|
||||
|
||||
var _ = (encoding.TextMarshaler)(GUID{})
|
||||
var _ = (encoding.TextUnmarshaler)(&GUID{})
|
||||
|
||||
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
|
||||
func NewV4() (GUID, error) {
|
||||
var b [16]byte
|
||||
if _, err := rand.Read(b[:]); err != nil {
|
||||
return GUID{}, err
|
||||
}
|
||||
|
||||
g := FromArray(b)
|
||||
g.setVersion(4) // Version 4 means randomly generated.
|
||||
g.setVariant(VariantRFC4122)
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing)
|
||||
// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name,
|
||||
// and the sample code treats it as a series of bytes, so we do the same here.
|
||||
//
|
||||
// Some implementations, such as those found on Windows, treat the name as a
|
||||
// big-endian UTF16 stream of bytes. If that is desired, the string can be
|
||||
// encoded as such before being passed to this function.
|
||||
func NewV5(namespace GUID, name []byte) (GUID, error) {
|
||||
b := sha1.New() //nolint:gosec // not used for secure application
|
||||
namespaceBytes := namespace.ToArray()
|
||||
b.Write(namespaceBytes[:])
|
||||
b.Write(name)
|
||||
|
||||
a := [16]byte{}
|
||||
copy(a[:], b.Sum(nil))
|
||||
|
||||
g := FromArray(a)
|
||||
g.setVersion(5) // Version 5 means generated from a string.
|
||||
g.setVariant(VariantRFC4122)
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
func fromArray(b [16]byte, order binary.ByteOrder) GUID {
|
||||
var g GUID
|
||||
g.Data1 = order.Uint32(b[0:4])
|
||||
g.Data2 = order.Uint16(b[4:6])
|
||||
g.Data3 = order.Uint16(b[6:8])
|
||||
copy(g.Data4[:], b[8:16])
|
||||
return g
|
||||
}
|
||||
|
||||
func (g GUID) toArray(order binary.ByteOrder) [16]byte {
|
||||
b := [16]byte{}
|
||||
order.PutUint32(b[0:4], g.Data1)
|
||||
order.PutUint16(b[4:6], g.Data2)
|
||||
order.PutUint16(b[6:8], g.Data3)
|
||||
copy(b[8:16], g.Data4[:])
|
||||
return b
|
||||
}
|
||||
|
||||
// FromArray constructs a GUID from a big-endian encoding array of 16 bytes.
|
||||
func FromArray(b [16]byte) GUID {
|
||||
return fromArray(b, binary.BigEndian)
|
||||
}
|
||||
|
||||
// ToArray returns an array of 16 bytes representing the GUID in big-endian
|
||||
// encoding.
|
||||
func (g GUID) ToArray() [16]byte {
|
||||
return g.toArray(binary.BigEndian)
|
||||
}
|
||||
|
||||
// FromWindowsArray constructs a GUID from a Windows encoding array of bytes.
|
||||
func FromWindowsArray(b [16]byte) GUID {
|
||||
return fromArray(b, binary.LittleEndian)
|
||||
}
|
||||
|
||||
// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows
|
||||
// encoding.
|
||||
func (g GUID) ToWindowsArray() [16]byte {
|
||||
return g.toArray(binary.LittleEndian)
|
||||
}
|
||||
|
||||
func (g GUID) String() string {
|
||||
return fmt.Sprintf(
|
||||
"%08x-%04x-%04x-%04x-%012x",
|
||||
g.Data1,
|
||||
g.Data2,
|
||||
g.Data3,
|
||||
g.Data4[:2],
|
||||
g.Data4[2:])
|
||||
}
|
||||
|
||||
// FromString parses a string containing a GUID and returns the GUID. The only
|
||||
// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
|
||||
// format.
|
||||
func FromString(s string) (GUID, error) {
|
||||
if len(s) != 36 {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
|
||||
var g GUID
|
||||
|
||||
data1, err := strconv.ParseUint(s[0:8], 16, 32)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data1 = uint32(data1)
|
||||
|
||||
data2, err := strconv.ParseUint(s[9:13], 16, 16)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data2 = uint16(data2)
|
||||
|
||||
data3, err := strconv.ParseUint(s[14:18], 16, 16)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data3 = uint16(data3)
|
||||
|
||||
for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} {
|
||||
v, err := strconv.ParseUint(s[x:x+2], 16, 8)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data4[i] = uint8(v)
|
||||
}
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
func (g *GUID) setVariant(v Variant) {
|
||||
d := g.Data4[0]
|
||||
switch v {
|
||||
case VariantNCS:
|
||||
d = (d & 0x7f)
|
||||
case VariantRFC4122:
|
||||
d = (d & 0x3f) | 0x80
|
||||
case VariantMicrosoft:
|
||||
d = (d & 0x1f) | 0xc0
|
||||
case VariantFuture:
|
||||
d = (d & 0x0f) | 0xe0
|
||||
case VariantUnknown:
|
||||
fallthrough
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid variant: %d", v))
|
||||
}
|
||||
g.Data4[0] = d
|
||||
}
|
||||
|
||||
// Variant returns the GUID variant, as defined in RFC 4122.
|
||||
func (g GUID) Variant() Variant {
|
||||
b := g.Data4[0]
|
||||
if b&0x80 == 0 {
|
||||
return VariantNCS
|
||||
} else if b&0xc0 == 0x80 {
|
||||
return VariantRFC4122
|
||||
} else if b&0xe0 == 0xc0 {
|
||||
return VariantMicrosoft
|
||||
} else if b&0xe0 == 0xe0 {
|
||||
return VariantFuture
|
||||
}
|
||||
return VariantUnknown
|
||||
}
|
||||
|
||||
func (g *GUID) setVersion(v Version) {
|
||||
g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12)
|
||||
}
|
||||
|
||||
// Version returns the GUID version, as defined in RFC 4122.
|
||||
func (g GUID) Version() Version {
|
||||
return Version((g.Data3 & 0xF000) >> 12)
|
||||
}
|
||||
|
||||
// MarshalText returns the textual representation of the GUID.
|
||||
func (g GUID) MarshalText() ([]byte, error) {
|
||||
return []byte(g.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText takes the textual representation of a GUID, and unmarhals it
|
||||
// into this GUID.
|
||||
func (g *GUID) UnmarshalText(text []byte) error {
|
||||
g2, err := FromString(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*g = g2
|
||||
return nil
|
||||
}
|
16
e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
generated
vendored
Normal file
16
e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package guid
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type as that is only available to builds
|
||||
// targeted at `windows`. The representation matches that used by native Windows
|
||||
// code.
|
||||
type GUID struct {
|
||||
Data1 uint32
|
||||
Data2 uint16
|
||||
Data3 uint16
|
||||
Data4 [8]byte
|
||||
}
|
13
e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
generated
vendored
Normal file
13
e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package guid
|
||||
|
||||
import "golang.org/x/sys/windows"
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type so that stringification and
|
||||
// marshaling can be supported. The representation matches that used by native
|
||||
// Windows code.
|
||||
type GUID windows.GUID
|
27
e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go
generated
vendored
Normal file
27
e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT.
|
||||
|
||||
package guid
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[VariantUnknown-0]
|
||||
_ = x[VariantNCS-1]
|
||||
_ = x[VariantRFC4122-2]
|
||||
_ = x[VariantMicrosoft-3]
|
||||
_ = x[VariantFuture-4]
|
||||
}
|
||||
|
||||
const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture"
|
||||
|
||||
var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33}
|
||||
|
||||
func (i Variant) String() string {
|
||||
if i >= Variant(len(_Variant_index)-1) {
|
||||
return "Variant(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _Variant_name[_Variant_index[i]:_Variant_index[i+1]]
|
||||
}
|
196
e2e/vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
Normal file
196
e2e/vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
Normal file
@ -0,0 +1,196 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"unicode/utf16"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
|
||||
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
|
||||
//sys revertToSelf() (err error) = advapi32.RevertToSelf
|
||||
//sys openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
|
||||
//sys getCurrentThread() (h windows.Handle) = GetCurrentThread
|
||||
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
|
||||
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
|
||||
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
|
||||
|
||||
const (
|
||||
//revive:disable-next-line:var-naming ALL_CAPS
|
||||
SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED
|
||||
|
||||
//revive:disable-next-line:var-naming ALL_CAPS
|
||||
ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED
|
||||
|
||||
SeBackupPrivilege = "SeBackupPrivilege"
|
||||
SeRestorePrivilege = "SeRestorePrivilege"
|
||||
SeSecurityPrivilege = "SeSecurityPrivilege"
|
||||
)
|
||||
|
||||
var (
|
||||
privNames = make(map[string]uint64)
|
||||
privNameMutex sync.Mutex
|
||||
)
|
||||
|
||||
// PrivilegeError represents an error enabling privileges.
|
||||
type PrivilegeError struct {
|
||||
privileges []uint64
|
||||
}
|
||||
|
||||
func (e *PrivilegeError) Error() string {
|
||||
s := "Could not enable privilege "
|
||||
if len(e.privileges) > 1 {
|
||||
s = "Could not enable privileges "
|
||||
}
|
||||
for i, p := range e.privileges {
|
||||
if i != 0 {
|
||||
s += ", "
|
||||
}
|
||||
s += `"`
|
||||
s += getPrivilegeName(p)
|
||||
s += `"`
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// RunWithPrivilege enables a single privilege for a function call.
|
||||
func RunWithPrivilege(name string, fn func() error) error {
|
||||
return RunWithPrivileges([]string{name}, fn)
|
||||
}
|
||||
|
||||
// RunWithPrivileges enables privileges for a function call.
|
||||
func RunWithPrivileges(names []string, fn func() error) error {
|
||||
privileges, err := mapPrivileges(names)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
token, err := newThreadToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer releaseThreadToken(token)
|
||||
err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fn()
|
||||
}
|
||||
|
||||
func mapPrivileges(names []string) ([]uint64, error) {
|
||||
privileges := make([]uint64, 0, len(names))
|
||||
privNameMutex.Lock()
|
||||
defer privNameMutex.Unlock()
|
||||
for _, name := range names {
|
||||
p, ok := privNames[name]
|
||||
if !ok {
|
||||
err := lookupPrivilegeValue("", name, &p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privNames[name] = p
|
||||
}
|
||||
privileges = append(privileges, p)
|
||||
}
|
||||
return privileges, nil
|
||||
}
|
||||
|
||||
// EnableProcessPrivileges enables privileges globally for the process.
|
||||
func EnableProcessPrivileges(names []string) error {
|
||||
return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED)
|
||||
}
|
||||
|
||||
// DisableProcessPrivileges disables privileges globally for the process.
|
||||
func DisableProcessPrivileges(names []string) error {
|
||||
return enableDisableProcessPrivilege(names, 0)
|
||||
}
|
||||
|
||||
func enableDisableProcessPrivilege(names []string, action uint32) error {
|
||||
privileges, err := mapPrivileges(names)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := windows.CurrentProcess()
|
||||
var token windows.Token
|
||||
err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer token.Close()
|
||||
return adjustPrivileges(token, privileges, action)
|
||||
}
|
||||
|
||||
func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
|
||||
var b bytes.Buffer
|
||||
_ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
|
||||
for _, p := range privileges {
|
||||
_ = binary.Write(&b, binary.LittleEndian, p)
|
||||
_ = binary.Write(&b, binary.LittleEndian, action)
|
||||
}
|
||||
prevState := make([]byte, b.Len())
|
||||
reqSize := uint32(0)
|
||||
success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize)
|
||||
if !success {
|
||||
return err
|
||||
}
|
||||
if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno
|
||||
return &PrivilegeError{privileges}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPrivilegeName(luid uint64) string {
|
||||
var nameBuffer [256]uint16
|
||||
bufSize := uint32(len(nameBuffer))
|
||||
err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("<unknown privilege %d>", luid)
|
||||
}
|
||||
|
||||
var displayNameBuffer [256]uint16
|
||||
displayBufSize := uint32(len(displayNameBuffer))
|
||||
var langID uint32
|
||||
err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("<unknown privilege %s>", string(utf16.Decode(nameBuffer[:bufSize])))
|
||||
}
|
||||
|
||||
return string(utf16.Decode(displayNameBuffer[:displayBufSize]))
|
||||
}
|
||||
|
||||
func newThreadToken() (windows.Token, error) {
|
||||
err := impersonateSelf(windows.SecurityImpersonation)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var token windows.Token
|
||||
err = openThreadToken(getCurrentThread(), windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, false, &token)
|
||||
if err != nil {
|
||||
rerr := revertToSelf()
|
||||
if rerr != nil {
|
||||
panic(rerr)
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func releaseThreadToken(h windows.Token) {
|
||||
err := revertToSelf()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
h.Close()
|
||||
}
|
131
e2e/vendor/github.com/Microsoft/go-winio/reparse.go
generated
vendored
Normal file
131
e2e/vendor/github.com/Microsoft/go-winio/reparse.go
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
reparseTagMountPoint = 0xA0000003
|
||||
reparseTagSymlink = 0xA000000C
|
||||
)
|
||||
|
||||
type reparseDataBuffer struct {
|
||||
ReparseTag uint32
|
||||
ReparseDataLength uint16
|
||||
Reserved uint16
|
||||
SubstituteNameOffset uint16
|
||||
SubstituteNameLength uint16
|
||||
PrintNameOffset uint16
|
||||
PrintNameLength uint16
|
||||
}
|
||||
|
||||
// ReparsePoint describes a Win32 symlink or mount point.
|
||||
type ReparsePoint struct {
|
||||
Target string
|
||||
IsMountPoint bool
|
||||
}
|
||||
|
||||
// UnsupportedReparsePointError is returned when trying to decode a non-symlink or
|
||||
// mount point reparse point.
|
||||
type UnsupportedReparsePointError struct {
|
||||
Tag uint32
|
||||
}
|
||||
|
||||
func (e *UnsupportedReparsePointError) Error() string {
|
||||
return fmt.Sprintf("unsupported reparse point %x", e.Tag)
|
||||
}
|
||||
|
||||
// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
|
||||
// or a mount point.
|
||||
func DecodeReparsePoint(b []byte) (*ReparsePoint, error) {
|
||||
tag := binary.LittleEndian.Uint32(b[0:4])
|
||||
return DecodeReparsePointData(tag, b[8:])
|
||||
}
|
||||
|
||||
func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) {
|
||||
isMountPoint := false
|
||||
switch tag {
|
||||
case reparseTagMountPoint:
|
||||
isMountPoint = true
|
||||
case reparseTagSymlink:
|
||||
default:
|
||||
return nil, &UnsupportedReparsePointError{tag}
|
||||
}
|
||||
nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6])
|
||||
if !isMountPoint {
|
||||
nameOffset += 4
|
||||
}
|
||||
nameLength := binary.LittleEndian.Uint16(b[6:8])
|
||||
name := make([]uint16, nameLength/2)
|
||||
err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil
|
||||
}
|
||||
|
||||
func isDriveLetter(c byte) bool {
|
||||
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
||||
}
|
||||
|
||||
// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or
|
||||
// mount point.
|
||||
func EncodeReparsePoint(rp *ReparsePoint) []byte {
|
||||
// Generate an NT path and determine if this is a relative path.
|
||||
var ntTarget string
|
||||
relative := false
|
||||
if strings.HasPrefix(rp.Target, `\\?\`) {
|
||||
ntTarget = `\??\` + rp.Target[4:]
|
||||
} else if strings.HasPrefix(rp.Target, `\\`) {
|
||||
ntTarget = `\??\UNC\` + rp.Target[2:]
|
||||
} else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' {
|
||||
ntTarget = `\??\` + rp.Target
|
||||
} else {
|
||||
ntTarget = rp.Target
|
||||
relative = true
|
||||
}
|
||||
|
||||
// The paths must be NUL-terminated even though they are counted strings.
|
||||
target16 := utf16.Encode([]rune(rp.Target + "\x00"))
|
||||
ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00"))
|
||||
|
||||
size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8
|
||||
size += len(ntTarget16)*2 + len(target16)*2
|
||||
|
||||
tag := uint32(reparseTagMountPoint)
|
||||
if !rp.IsMountPoint {
|
||||
tag = reparseTagSymlink
|
||||
size += 4 // Add room for symlink flags
|
||||
}
|
||||
|
||||
data := reparseDataBuffer{
|
||||
ReparseTag: tag,
|
||||
ReparseDataLength: uint16(size),
|
||||
SubstituteNameOffset: 0,
|
||||
SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2),
|
||||
PrintNameOffset: uint16(len(ntTarget16) * 2),
|
||||
PrintNameLength: uint16((len(target16) - 1) * 2),
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
_ = binary.Write(&b, binary.LittleEndian, &data)
|
||||
if !rp.IsMountPoint {
|
||||
flags := uint32(0)
|
||||
if relative {
|
||||
flags |= 1
|
||||
}
|
||||
_ = binary.Write(&b, binary.LittleEndian, flags)
|
||||
}
|
||||
|
||||
_ = binary.Write(&b, binary.LittleEndian, ntTarget16)
|
||||
_ = binary.Write(&b, binary.LittleEndian, target16)
|
||||
return b.Bytes()
|
||||
}
|
133
e2e/vendor/github.com/Microsoft/go-winio/sd.go
generated
vendored
Normal file
133
e2e/vendor/github.com/Microsoft/go-winio/sd.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
|
||||
//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW
|
||||
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
|
||||
//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW
|
||||
|
||||
type AccountLookupError struct {
|
||||
Name string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *AccountLookupError) Error() string {
|
||||
if e.Name == "" {
|
||||
return "lookup account: empty account name specified"
|
||||
}
|
||||
var s string
|
||||
switch {
|
||||
case errors.Is(e.Err, windows.ERROR_INVALID_SID):
|
||||
s = "the security ID structure is invalid"
|
||||
case errors.Is(e.Err, windows.ERROR_NONE_MAPPED):
|
||||
s = "not found"
|
||||
default:
|
||||
s = e.Err.Error()
|
||||
}
|
||||
return "lookup account " + e.Name + ": " + s
|
||||
}
|
||||
|
||||
func (e *AccountLookupError) Unwrap() error { return e.Err }
|
||||
|
||||
type SddlConversionError struct {
|
||||
Sddl string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *SddlConversionError) Error() string {
|
||||
return "convert " + e.Sddl + ": " + e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *SddlConversionError) Unwrap() error { return e.Err }
|
||||
|
||||
// LookupSidByName looks up the SID of an account by name
|
||||
//
|
||||
//revive:disable-next-line:var-naming SID, not Sid
|
||||
func LookupSidByName(name string) (sid string, err error) {
|
||||
if name == "" {
|
||||
return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED}
|
||||
}
|
||||
|
||||
var sidSize, sidNameUse, refDomainSize uint32
|
||||
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
|
||||
if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
sidBuffer := make([]byte, sidSize)
|
||||
refDomainBuffer := make([]uint16, refDomainSize)
|
||||
err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
|
||||
if err != nil {
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
var strBuffer *uint16
|
||||
err = convertSidToStringSid(&sidBuffer[0], &strBuffer)
|
||||
if err != nil {
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
sid = windows.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
|
||||
_, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(strBuffer)))
|
||||
return sid, nil
|
||||
}
|
||||
|
||||
// LookupNameBySid looks up the name of an account by SID
|
||||
//
|
||||
//revive:disable-next-line:var-naming SID, not Sid
|
||||
func LookupNameBySid(sid string) (name string, err error) {
|
||||
if sid == "" {
|
||||
return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED}
|
||||
}
|
||||
|
||||
sidBuffer, err := windows.UTF16PtrFromString(sid)
|
||||
if err != nil {
|
||||
return "", &AccountLookupError{sid, err}
|
||||
}
|
||||
|
||||
var sidPtr *byte
|
||||
if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil {
|
||||
return "", &AccountLookupError{sid, err}
|
||||
}
|
||||
defer windows.LocalFree(windows.Handle(unsafe.Pointer(sidPtr))) //nolint:errcheck
|
||||
|
||||
var nameSize, refDomainSize, sidNameUse uint32
|
||||
err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse)
|
||||
if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
|
||||
return "", &AccountLookupError{sid, err}
|
||||
}
|
||||
|
||||
nameBuffer := make([]uint16, nameSize)
|
||||
refDomainBuffer := make([]uint16, refDomainSize)
|
||||
err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
|
||||
if err != nil {
|
||||
return "", &AccountLookupError{sid, err}
|
||||
}
|
||||
|
||||
name = windows.UTF16ToString(nameBuffer)
|
||||
return name, nil
|
||||
}
|
||||
|
||||
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
|
||||
sd, err := windows.SecurityDescriptorFromString(sddl)
|
||||
if err != nil {
|
||||
return nil, &SddlConversionError{Sddl: sddl, Err: err}
|
||||
}
|
||||
b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length())
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func SecurityDescriptorToSddl(sd []byte) (string, error) {
|
||||
if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l {
|
||||
return "", fmt.Errorf("SecurityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE)
|
||||
}
|
||||
s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0]))
|
||||
return s.String(), nil
|
||||
}
|
5
e2e/vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
Normal file
5
e2e/vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
//go:build windows
|
||||
|
||||
package winio
|
||||
|
||||
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go
|
378
e2e/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
Normal file
378
e2e/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
Normal file
@ -0,0 +1,378 @@
|
||||
//go:build windows
|
||||
|
||||
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
errERROR_EINVAL error = syscall.EINVAL
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return errERROR_EINVAL
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
|
||||
|
||||
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
|
||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||
procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW")
|
||||
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
|
||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||
procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW")
|
||||
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
||||
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
|
||||
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
|
||||
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
|
||||
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
|
||||
procBackupRead = modkernel32.NewProc("BackupRead")
|
||||
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||
procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe")
|
||||
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
|
||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
||||
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
|
||||
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
|
||||
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
|
||||
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
||||
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
||||
)
|
||||
|
||||
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
|
||||
var _p0 uint32
|
||||
if releaseAll {
|
||||
_p0 = 1
|
||||
}
|
||||
r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
|
||||
success = r0 != 0
|
||||
if true {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func convertSidToStringSid(sid *byte, str **uint16) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func convertStringSidToSid(str *uint16, sid **byte) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func impersonateSelf(level uint32) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(level))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(accountName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
|
||||
}
|
||||
|
||||
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupPrivilegeName(_p0, luid, buffer, size)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *uint16
|
||||
_p1, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupPrivilegeValue(_p0, _p1, luid)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
|
||||
var _p0 uint32
|
||||
if openAsSelf {
|
||||
_p0 = 1
|
||||
}
|
||||
r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func revertToSelf() (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr())
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
||||
var _p0 *byte
|
||||
if len(b) > 0 {
|
||||
_p0 = &b[0]
|
||||
}
|
||||
var _p1 uint32
|
||||
if abort {
|
||||
_p1 = 1
|
||||
}
|
||||
var _p2 uint32
|
||||
if processSecurity {
|
||||
_p2 = 1
|
||||
}
|
||||
r1, _, e1 := syscall.SyscallN(procBackupRead.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
||||
var _p0 *byte
|
||||
if len(b) > 0 {
|
||||
_p0 = &b[0]
|
||||
}
|
||||
var _p1 uint32
|
||||
if abort {
|
||||
_p1 = 1
|
||||
}
|
||||
var _p2 uint32
|
||||
if processSecurity {
|
||||
_p2 = 1
|
||||
}
|
||||
r1, _, e1 := syscall.SyscallN(procBackupWrite.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(file), uintptr(unsafe.Pointer(o)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(o)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) {
|
||||
r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount))
|
||||
newport = windows.Handle(r0)
|
||||
if newport == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
|
||||
}
|
||||
|
||||
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) {
|
||||
r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)))
|
||||
handle = windows.Handle(r0)
|
||||
if handle == windows.InvalidHandle {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func disconnectNamedPipe(pipe windows.Handle) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getCurrentThread() (h windows.Handle) {
|
||||
r0, _, _ := syscall.SyscallN(procGetCurrentThread.Addr())
|
||||
h = windows.Handle(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(h), uintptr(flags))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) {
|
||||
r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)))
|
||||
status = ntStatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) {
|
||||
r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(dacl)))
|
||||
status = ntStatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) {
|
||||
r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved))
|
||||
status = ntStatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlNtStatusToDosError(status ntStatus) (winerr error) {
|
||||
r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status))
|
||||
if r0 != 0 {
|
||||
winerr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
|
||||
var _p0 uint32
|
||||
if wait {
|
||||
_p0 = 1
|
||||
}
|
||||
r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
1
e2e/vendor/github.com/NYTimes/gziphandler/.gitignore
generated
vendored
Normal file
1
e2e/vendor/github.com/NYTimes/gziphandler/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
*.swp
|
10
e2e/vendor/github.com/NYTimes/gziphandler/.travis.yml
generated
vendored
Normal file
10
e2e/vendor/github.com/NYTimes/gziphandler/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.x
|
||||
- tip
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
install:
|
||||
- go mod download
|
||||
script:
|
||||
- go test -race -v
|
75
e2e/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md
generated
vendored
Normal file
75
e2e/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
---
|
||||
layout: code-of-conduct
|
||||
version: v1.0
|
||||
---
|
||||
|
||||
This code of conduct outlines our expectations for participants within the **NYTimes/gziphandler** community, as well as steps to reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all and expect our code of conduct to be honored. Anyone who violates this code of conduct may be banned from the community.
|
||||
|
||||
Our open source community strives to:
|
||||
|
||||
* **Be friendly and patient.**
|
||||
* **Be welcoming**: We strive to be a community that welcomes and supports people of all backgrounds and identities. This includes, but is not limited to members of any race, ethnicity, culture, national origin, colour, immigration status, social and economic class, educational level, sex, sexual orientation, gender identity and expression, age, size, family status, political belief, religion, and mental and physical ability.
|
||||
* **Be considerate**: Your work will be used by other people, and you in turn will depend on the work of others. Any decision you take will affect users and colleagues, and you should take those consequences into account when making decisions. Remember that we're a world-wide community, so you might not be communicating in someone else's primary language.
|
||||
* **Be respectful**: Not all of us will agree all the time, but disagreement is no excuse for poor behavior and poor manners. We might all experience some frustration now and then, but we cannot allow that frustration to turn into a personal attack. It’s important to remember that a community where people feel uncomfortable or threatened is not a productive one.
|
||||
* **Be careful in the words that we choose**: we are a community of professionals, and we conduct ourselves professionally. Be kind to others. Do not insult or put down other participants. Harassment and other exclusionary behavior aren't acceptable.
|
||||
* **Try to understand why we disagree**: Disagreements, both social and technical, happen all the time. It is important that we resolve disagreements and differing views constructively. Remember that we’re different. The strength of our community comes from its diversity, people from a wide range of backgrounds. Different people have different perspectives on issues. Being unable to understand why someone holds a viewpoint doesn’t mean that they’re wrong. Don’t forget that it is human to err and blaming each other doesn’t get us anywhere. Instead, focus on helping to resolve issues and learning from mistakes.
|
||||
|
||||
## Definitions
|
||||
|
||||
Harassment includes, but is not limited to:
|
||||
|
||||
- Offensive comments related to gender, gender identity and expression, sexual orientation, disability, mental illness, neuro(a)typicality, physical appearance, body size, race, age, regional discrimination, political or religious affiliation
|
||||
- Unwelcome comments regarding a person’s lifestyle choices and practices, including those related to food, health, parenting, drugs, and employment
|
||||
- Deliberate misgendering. This includes deadnaming or persistently using a pronoun that does not correctly reflect a person's gender identity. You must address people by the name they give you when not addressing them by their username or handle
|
||||
- Physical contact and simulated physical contact (eg, textual descriptions like “*hug*” or “*backrub*”) without consent or after a request to stop
|
||||
- Threats of violence, both physical and psychological
|
||||
- Incitement of violence towards any individual, including encouraging a person to commit suicide or to engage in self-harm
|
||||
- Deliberate intimidation
|
||||
- Stalking or following
|
||||
- Harassing photography or recording, including logging online activity for harassment purposes
|
||||
- Sustained disruption of discussion
|
||||
- Unwelcome sexual attention, including gratuitous or off-topic sexual images or behaviour
|
||||
- Pattern of inappropriate social contact, such as requesting/assuming inappropriate levels of intimacy with others
|
||||
- Continued one-on-one communication after requests to cease
|
||||
- Deliberate “outing” of any aspect of a person’s identity without their consent except as necessary to protect others from intentional abuse
|
||||
- Publication of non-harassing private communication
|
||||
|
||||
Our open source community prioritizes marginalized people’s safety over privileged people’s comfort. We will not act on complaints regarding:
|
||||
|
||||
- ‘Reverse’ -isms, including ‘reverse racism,’ ‘reverse sexism,’ and ‘cisphobia’
|
||||
- Reasonable communication of boundaries, such as “leave me alone,” “go away,” or “I’m not discussing this with you”
|
||||
- Refusal to explain or debate social justice concepts
|
||||
- Communicating in a ‘tone’ you don’t find congenial
|
||||
- Criticizing racist, sexist, cissexist, or otherwise oppressive behavior or assumptions
|
||||
|
||||
|
||||
### Diversity Statement
|
||||
|
||||
We encourage everyone to participate and are committed to building a community for all. Although we will fail at times, we seek to treat everyone both as fairly and equally as possible. Whenever a participant has made a mistake, we expect them to take responsibility for it. If someone has been harmed or offended, it is our responsibility to listen carefully and respectfully, and do our best to right the wrong.
|
||||
|
||||
Although this list cannot be exhaustive, we explicitly honor diversity in age, gender, gender identity or expression, culture, ethnicity, language, national origin, political beliefs, profession, race, religion, sexual orientation, socioeconomic status, and technical ability. We will not tolerate discrimination based on any of the protected
|
||||
characteristics above, including participants with disabilities.
|
||||
|
||||
### Reporting Issues
|
||||
|
||||
If you experience or witness unacceptable behavior—or have any other concerns—please report it by contacting us via **code@nytimes.com**. All reports will be handled with discretion. In your report please include:
|
||||
|
||||
- Your contact information.
|
||||
- Names (real, nicknames, or pseudonyms) of any individuals involved. If there are additional witnesses, please
|
||||
include them as well. Your account of what occurred, and if you believe the incident is ongoing. If there is a publicly available record (e.g. a mailing list archive or a public IRC logger), please include a link.
|
||||
- Any additional information that may be helpful.
|
||||
|
||||
After filing a report, a representative will contact you personally, review the incident, follow up with any additional questions, and make a decision as to how to respond. If the person who is harassing you is part of the response team, they will recuse themselves from handling your incident. If the complaint originates from a member of the response team, it will be handled by a different member of the response team. We will respect confidentiality requests for the purpose of protecting victims of abuse.
|
||||
|
||||
### Attribution & Acknowledgements
|
||||
|
||||
We all stand on the shoulders of giants across many open source communities. We'd like to thank the communities and projects that established code of conducts and diversity statements as our inspiration:
|
||||
|
||||
* [Django](https://www.djangoproject.com/conduct/reporting/)
|
||||
* [Python](https://www.python.org/community/diversity/)
|
||||
* [Ubuntu](http://www.ubuntu.com/about/about-ubuntu/conduct)
|
||||
* [Contributor Covenant](http://contributor-covenant.org/)
|
||||
* [Geek Feminism](http://geekfeminism.org/about/code-of-conduct/)
|
||||
* [Citizen Code of Conduct](http://citizencodeofconduct.org/)
|
||||
|
||||
This Code of Conduct was based on https://github.com/todogroup/opencodeofconduct
|
30
e2e/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md
generated
vendored
Normal file
30
e2e/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
# Contributing to NYTimes/gziphandler
|
||||
|
||||
This is an open source project started by handful of developers at The New York Times and open to the entire Go community.
|
||||
|
||||
We really appreciate your help!
|
||||
|
||||
## Filing issues
|
||||
|
||||
When filing an issue, make sure to answer these five questions:
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
2. What operating system and processor architecture are you using?
|
||||
3. What did you do?
|
||||
4. What did you expect to see?
|
||||
5. What did you see instead?
|
||||
|
||||
## Contributing code
|
||||
|
||||
Before submitting changes, please follow these guidelines:
|
||||
|
||||
1. Check the open issues and pull requests for existing discussions.
|
||||
2. Open an issue to discuss a new feature.
|
||||
3. Write tests.
|
||||
4. Make sure code follows the ['Go Code Review Comments'](https://github.com/golang/go/wiki/CodeReviewComments).
|
||||
5. Make sure your changes pass `go test`.
|
||||
6. Make sure the entire test suite passes locally and on Travis CI.
|
||||
7. Open a Pull Request.
|
||||
8. [Squash your commits](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) after receiving feedback and add a [great commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
|
||||
|
||||
Unless otherwise noted, the gziphandler source files are distributed under the Apache 2.0-style license found in the LICENSE.md file.
|
201
e2e/vendor/github.com/NYTimes/gziphandler/LICENSE
generated
vendored
Normal file
201
e2e/vendor/github.com/NYTimes/gziphandler/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016-2017 The New York Times Company
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
56
e2e/vendor/github.com/NYTimes/gziphandler/README.md
generated
vendored
Normal file
56
e2e/vendor/github.com/NYTimes/gziphandler/README.md
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
Gzip Handler
|
||||
============
|
||||
|
||||
This is a tiny Go package which wraps HTTP handlers to transparently gzip the
|
||||
response body, for clients which support it. Although it's usually simpler to
|
||||
leave that to a reverse proxy (like nginx or Varnish), this package is useful
|
||||
when that's undesirable.
|
||||
|
||||
## Install
|
||||
```bash
|
||||
go get -u github.com/NYTimes/gziphandler
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Call `GzipHandler` with any handler (an object which implements the
|
||||
`http.Handler` interface), and it'll return a new handler which gzips the
|
||||
response. For example:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"github.com/NYTimes/gziphandler"
|
||||
)
|
||||
|
||||
func main() {
|
||||
withoutGz := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
io.WriteString(w, "Hello, World")
|
||||
})
|
||||
|
||||
withGz := gziphandler.GzipHandler(withoutGz)
|
||||
|
||||
http.Handle("/", withGz)
|
||||
http.ListenAndServe("0.0.0.0:8000", nil)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
The docs can be found at [godoc.org][docs], as usual.
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[Apache 2.0][license].
|
||||
|
||||
|
||||
|
||||
|
||||
[docs]: https://godoc.org/github.com/NYTimes/gziphandler
|
||||
[license]: https://github.com/NYTimes/gziphandler/blob/master/LICENSE
|
532
e2e/vendor/github.com/NYTimes/gziphandler/gzip.go
generated
vendored
Normal file
532
e2e/vendor/github.com/NYTimes/gziphandler/gzip.go
generated
vendored
Normal file
@ -0,0 +1,532 @@
|
||||
package gziphandler // import "github.com/NYTimes/gziphandler"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
vary = "Vary"
|
||||
acceptEncoding = "Accept-Encoding"
|
||||
contentEncoding = "Content-Encoding"
|
||||
contentType = "Content-Type"
|
||||
contentLength = "Content-Length"
|
||||
)
|
||||
|
||||
type codings map[string]float64
|
||||
|
||||
const (
|
||||
// DefaultQValue is the default qvalue to assign to an encoding if no explicit qvalue is set.
|
||||
// This is actually kind of ambiguous in RFC 2616, so hopefully it's correct.
|
||||
// The examples seem to indicate that it is.
|
||||
DefaultQValue = 1.0
|
||||
|
||||
// DefaultMinSize is the default minimum size until we enable gzip compression.
|
||||
// 1500 bytes is the MTU size for the internet since that is the largest size allowed at the network layer.
|
||||
// If you take a file that is 1300 bytes and compress it to 800 bytes, it’s still transmitted in that same 1500 byte packet regardless, so you’ve gained nothing.
|
||||
// That being the case, you should restrict the gzip compression to files with a size greater than a single packet, 1400 bytes (1.4KB) is a safe value.
|
||||
DefaultMinSize = 1400
|
||||
)
|
||||
|
||||
// gzipWriterPools stores a sync.Pool for each compression level for reuse of
|
||||
// gzip.Writers. Use poolIndex to covert a compression level to an index into
|
||||
// gzipWriterPools.
|
||||
var gzipWriterPools [gzip.BestCompression - gzip.BestSpeed + 2]*sync.Pool
|
||||
|
||||
func init() {
|
||||
for i := gzip.BestSpeed; i <= gzip.BestCompression; i++ {
|
||||
addLevelPool(i)
|
||||
}
|
||||
addLevelPool(gzip.DefaultCompression)
|
||||
}
|
||||
|
||||
// poolIndex maps a compression level to its index into gzipWriterPools. It
|
||||
// assumes that level is a valid gzip compression level.
|
||||
func poolIndex(level int) int {
|
||||
// gzip.DefaultCompression == -1, so we need to treat it special.
|
||||
if level == gzip.DefaultCompression {
|
||||
return gzip.BestCompression - gzip.BestSpeed + 1
|
||||
}
|
||||
return level - gzip.BestSpeed
|
||||
}
|
||||
|
||||
func addLevelPool(level int) {
|
||||
gzipWriterPools[poolIndex(level)] = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
// NewWriterLevel only returns error on a bad level, we are guaranteeing
|
||||
// that this will be a valid level so it is okay to ignore the returned
|
||||
// error.
|
||||
w, _ := gzip.NewWriterLevel(nil, level)
|
||||
return w
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GzipResponseWriter provides an http.ResponseWriter interface, which gzips
|
||||
// bytes before writing them to the underlying response. This doesn't close the
|
||||
// writers, so don't forget to do that.
|
||||
// It can be configured to skip response smaller than minSize.
|
||||
type GzipResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
index int // Index for gzipWriterPools.
|
||||
gw *gzip.Writer
|
||||
|
||||
code int // Saves the WriteHeader value.
|
||||
|
||||
minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed.
|
||||
buf []byte // Holds the first part of the write before reaching the minSize or the end of the write.
|
||||
ignore bool // If true, then we immediately passthru writes to the underlying ResponseWriter.
|
||||
|
||||
contentTypes []parsedContentType // Only compress if the response is one of these content-types. All are accepted if empty.
|
||||
}
|
||||
|
||||
type GzipResponseWriterWithCloseNotify struct {
|
||||
*GzipResponseWriter
|
||||
}
|
||||
|
||||
func (w GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool {
|
||||
return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
// Write appends data to the gzip writer.
|
||||
func (w *GzipResponseWriter) Write(b []byte) (int, error) {
|
||||
// GZIP responseWriter is initialized. Use the GZIP responseWriter.
|
||||
if w.gw != nil {
|
||||
return w.gw.Write(b)
|
||||
}
|
||||
|
||||
// If we have already decided not to use GZIP, immediately passthrough.
|
||||
if w.ignore {
|
||||
return w.ResponseWriter.Write(b)
|
||||
}
|
||||
|
||||
// Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter.
|
||||
// On the first write, w.buf changes from nil to a valid slice
|
||||
w.buf = append(w.buf, b...)
|
||||
|
||||
var (
|
||||
cl, _ = strconv.Atoi(w.Header().Get(contentLength))
|
||||
ct = w.Header().Get(contentType)
|
||||
ce = w.Header().Get(contentEncoding)
|
||||
)
|
||||
// Only continue if they didn't already choose an encoding or a known unhandled content length or type.
|
||||
if ce == "" && (cl == 0 || cl >= w.minSize) && (ct == "" || handleContentType(w.contentTypes, ct)) {
|
||||
// If the current buffer is less than minSize and a Content-Length isn't set, then wait until we have more data.
|
||||
if len(w.buf) < w.minSize && cl == 0 {
|
||||
return len(b), nil
|
||||
}
|
||||
// If the Content-Length is larger than minSize or the current buffer is larger than minSize, then continue.
|
||||
if cl >= w.minSize || len(w.buf) >= w.minSize {
|
||||
// If a Content-Type wasn't specified, infer it from the current buffer.
|
||||
if ct == "" {
|
||||
ct = http.DetectContentType(w.buf)
|
||||
w.Header().Set(contentType, ct)
|
||||
}
|
||||
// If the Content-Type is acceptable to GZIP, initialize the GZIP writer.
|
||||
if handleContentType(w.contentTypes, ct) {
|
||||
if err := w.startGzip(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// If we got here, we should not GZIP this response.
|
||||
if err := w.startPlain(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// startGzip initializes a GZIP writer and writes the buffer.
|
||||
func (w *GzipResponseWriter) startGzip() error {
|
||||
// Set the GZIP header.
|
||||
w.Header().Set(contentEncoding, "gzip")
|
||||
|
||||
// if the Content-Length is already set, then calls to Write on gzip
|
||||
// will fail to set the Content-Length header since its already set
|
||||
// See: https://github.com/golang/go/issues/14975.
|
||||
w.Header().Del(contentLength)
|
||||
|
||||
// Write the header to gzip response.
|
||||
if w.code != 0 {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
// Ensure that no other WriteHeader's happen
|
||||
w.code = 0
|
||||
}
|
||||
|
||||
// Initialize and flush the buffer into the gzip response if there are any bytes.
|
||||
// If there aren't any, we shouldn't initialize it yet because on Close it will
|
||||
// write the gzip header even if nothing was ever written.
|
||||
if len(w.buf) > 0 {
|
||||
// Initialize the GZIP response.
|
||||
w.init()
|
||||
n, err := w.gw.Write(w.buf)
|
||||
|
||||
// This should never happen (per io.Writer docs), but if the write didn't
|
||||
// accept the entire buffer but returned no specific error, we have no clue
|
||||
// what's going on, so abort just to be safe.
|
||||
if err == nil && n < len(w.buf) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// startPlain writes to sent bytes and buffer the underlying ResponseWriter without gzip.
|
||||
func (w *GzipResponseWriter) startPlain() error {
|
||||
if w.code != 0 {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
// Ensure that no other WriteHeader's happen
|
||||
w.code = 0
|
||||
}
|
||||
w.ignore = true
|
||||
// If Write was never called then don't call Write on the underlying ResponseWriter.
|
||||
if w.buf == nil {
|
||||
return nil
|
||||
}
|
||||
n, err := w.ResponseWriter.Write(w.buf)
|
||||
w.buf = nil
|
||||
// This should never happen (per io.Writer docs), but if the write didn't
|
||||
// accept the entire buffer but returned no specific error, we have no clue
|
||||
// what's going on, so abort just to be safe.
|
||||
if err == nil && n < len(w.buf) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteHeader just saves the response code until close or GZIP effective writes.
|
||||
func (w *GzipResponseWriter) WriteHeader(code int) {
|
||||
if w.code == 0 {
|
||||
w.code = code
|
||||
}
|
||||
}
|
||||
|
||||
// init graps a new gzip writer from the gzipWriterPool and writes the correct
|
||||
// content encoding header.
|
||||
func (w *GzipResponseWriter) init() {
|
||||
// Bytes written during ServeHTTP are redirected to this gzip writer
|
||||
// before being written to the underlying response.
|
||||
gzw := gzipWriterPools[w.index].Get().(*gzip.Writer)
|
||||
gzw.Reset(w.ResponseWriter)
|
||||
w.gw = gzw
|
||||
}
|
||||
|
||||
// Close will close the gzip.Writer and will put it back in the gzipWriterPool.
|
||||
func (w *GzipResponseWriter) Close() error {
|
||||
if w.ignore {
|
||||
return nil
|
||||
}
|
||||
|
||||
if w.gw == nil {
|
||||
// GZIP not triggered yet, write out regular response.
|
||||
err := w.startPlain()
|
||||
// Returns the error if any at write.
|
||||
if err != nil {
|
||||
err = fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err := w.gw.Close()
|
||||
gzipWriterPools[w.index].Put(w.gw)
|
||||
w.gw = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Flush flushes the underlying *gzip.Writer and then the underlying
|
||||
// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter
|
||||
// an http.Flusher.
|
||||
func (w *GzipResponseWriter) Flush() {
|
||||
if w.gw == nil && !w.ignore {
|
||||
// Only flush once startGzip or startPlain has been called.
|
||||
//
|
||||
// Flush is thus a no-op until we're certain whether a plain
|
||||
// or gzipped response will be served.
|
||||
return
|
||||
}
|
||||
|
||||
if w.gw != nil {
|
||||
w.gw.Flush()
|
||||
}
|
||||
|
||||
if fw, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||
fw.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Hijack implements http.Hijacker. If the underlying ResponseWriter is a
|
||||
// Hijacker, its Hijack method is returned. Otherwise an error is returned.
|
||||
func (w *GzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
if hj, ok := w.ResponseWriter.(http.Hijacker); ok {
|
||||
return hj.Hijack()
|
||||
}
|
||||
return nil, nil, fmt.Errorf("http.Hijacker interface is not supported")
|
||||
}
|
||||
|
||||
// verify Hijacker interface implementation
|
||||
var _ http.Hijacker = &GzipResponseWriter{}
|
||||
|
||||
// MustNewGzipLevelHandler behaves just like NewGzipLevelHandler except that in
|
||||
// an error case it panics rather than returning an error.
|
||||
func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler {
|
||||
wrap, err := NewGzipLevelHandler(level)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return wrap
|
||||
}
|
||||
|
||||
// NewGzipLevelHandler returns a wrapper function (often known as middleware)
|
||||
// which can be used to wrap an HTTP handler to transparently gzip the response
|
||||
// body if the client supports it (via the Accept-Encoding header). Responses will
|
||||
// be encoded at the given gzip compression level. An error will be returned only
|
||||
// if an invalid gzip compression level is given, so if one can ensure the level
|
||||
// is valid, the returned error can be safely ignored.
|
||||
func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
|
||||
return NewGzipLevelAndMinSize(level, DefaultMinSize)
|
||||
}
|
||||
|
||||
// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller
|
||||
// specify the minimum size before compression.
|
||||
func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) {
|
||||
return GzipHandlerWithOpts(CompressionLevel(level), MinSize(minSize))
|
||||
}
|
||||
|
||||
func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error) {
|
||||
c := &config{
|
||||
level: gzip.DefaultCompression,
|
||||
minSize: DefaultMinSize,
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
}
|
||||
|
||||
if err := c.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return func(h http.Handler) http.Handler {
|
||||
index := poolIndex(c.level)
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Add(vary, acceptEncoding)
|
||||
if acceptsGzip(r) {
|
||||
gw := &GzipResponseWriter{
|
||||
ResponseWriter: w,
|
||||
index: index,
|
||||
minSize: c.minSize,
|
||||
contentTypes: c.contentTypes,
|
||||
}
|
||||
defer gw.Close()
|
||||
|
||||
if _, ok := w.(http.CloseNotifier); ok {
|
||||
gwcn := GzipResponseWriterWithCloseNotify{gw}
|
||||
h.ServeHTTP(gwcn, r)
|
||||
} else {
|
||||
h.ServeHTTP(gw, r)
|
||||
}
|
||||
|
||||
} else {
|
||||
h.ServeHTTP(w, r)
|
||||
}
|
||||
})
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Parsed representation of one of the inputs to ContentTypes.
|
||||
// See https://golang.org/pkg/mime/#ParseMediaType
|
||||
type parsedContentType struct {
|
||||
mediaType string
|
||||
params map[string]string
|
||||
}
|
||||
|
||||
// equals returns whether this content type matches another content type.
|
||||
func (pct parsedContentType) equals(mediaType string, params map[string]string) bool {
|
||||
if pct.mediaType != mediaType {
|
||||
return false
|
||||
}
|
||||
// if pct has no params, don't care about other's params
|
||||
if len(pct.params) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// if pct has any params, they must be identical to other's.
|
||||
if len(pct.params) != len(params) {
|
||||
return false
|
||||
}
|
||||
for k, v := range pct.params {
|
||||
if w, ok := params[k]; !ok || v != w {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Used for functional configuration.
|
||||
type config struct {
|
||||
minSize int
|
||||
level int
|
||||
contentTypes []parsedContentType
|
||||
}
|
||||
|
||||
func (c *config) validate() error {
|
||||
if c.level != gzip.DefaultCompression && (c.level < gzip.BestSpeed || c.level > gzip.BestCompression) {
|
||||
return fmt.Errorf("invalid compression level requested: %d", c.level)
|
||||
}
|
||||
|
||||
if c.minSize < 0 {
|
||||
return fmt.Errorf("minimum size must be more than zero")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type option func(c *config)
|
||||
|
||||
func MinSize(size int) option {
|
||||
return func(c *config) {
|
||||
c.minSize = size
|
||||
}
|
||||
}
|
||||
|
||||
func CompressionLevel(level int) option {
|
||||
return func(c *config) {
|
||||
c.level = level
|
||||
}
|
||||
}
|
||||
|
||||
// ContentTypes specifies a list of content types to compare
|
||||
// the Content-Type header to before compressing. If none
|
||||
// match, the response will be returned as-is.
|
||||
//
|
||||
// Content types are compared in a case-insensitive, whitespace-ignored
|
||||
// manner.
|
||||
//
|
||||
// A MIME type without any other directive will match a content type
|
||||
// that has the same MIME type, regardless of that content type's other
|
||||
// directives. I.e., "text/html" will match both "text/html" and
|
||||
// "text/html; charset=utf-8".
|
||||
//
|
||||
// A MIME type with any other directive will only match a content type
|
||||
// that has the same MIME type and other directives. I.e.,
|
||||
// "text/html; charset=utf-8" will only match "text/html; charset=utf-8".
|
||||
//
|
||||
// By default, responses are gzipped regardless of
|
||||
// Content-Type.
|
||||
func ContentTypes(types []string) option {
|
||||
return func(c *config) {
|
||||
c.contentTypes = []parsedContentType{}
|
||||
for _, v := range types {
|
||||
mediaType, params, err := mime.ParseMediaType(v)
|
||||
if err == nil {
|
||||
c.contentTypes = append(c.contentTypes, parsedContentType{mediaType, params})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GzipHandler wraps an HTTP handler, to transparently gzip the response body if
|
||||
// the client supports it (via the Accept-Encoding header). This will compress at
|
||||
// the default compression level.
|
||||
func GzipHandler(h http.Handler) http.Handler {
|
||||
wrapper, _ := NewGzipLevelHandler(gzip.DefaultCompression)
|
||||
return wrapper(h)
|
||||
}
|
||||
|
||||
// acceptsGzip returns true if the given HTTP request indicates that it will
|
||||
// accept a gzipped response.
|
||||
func acceptsGzip(r *http.Request) bool {
|
||||
acceptedEncodings, _ := parseEncodings(r.Header.Get(acceptEncoding))
|
||||
return acceptedEncodings["gzip"] > 0.0
|
||||
}
|
||||
|
||||
// returns true if we've been configured to compress the specific content type.
|
||||
func handleContentType(contentTypes []parsedContentType, ct string) bool {
|
||||
// If contentTypes is empty we handle all content types.
|
||||
if len(contentTypes) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
mediaType, params, err := mime.ParseMediaType(ct)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, c := range contentTypes {
|
||||
if c.equals(mediaType, params) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// parseEncodings attempts to parse a list of codings, per RFC 2616, as might
|
||||
// appear in an Accept-Encoding header. It returns a map of content-codings to
|
||||
// quality values, and an error containing the errors encountered. It's probably
|
||||
// safe to ignore those, because silently ignoring errors is how the internet
|
||||
// works.
|
||||
//
|
||||
// See: http://tools.ietf.org/html/rfc2616#section-14.3.
|
||||
func parseEncodings(s string) (codings, error) {
|
||||
c := make(codings)
|
||||
var e []string
|
||||
|
||||
for _, ss := range strings.Split(s, ",") {
|
||||
coding, qvalue, err := parseCoding(ss)
|
||||
|
||||
if err != nil {
|
||||
e = append(e, err.Error())
|
||||
} else {
|
||||
c[coding] = qvalue
|
||||
}
|
||||
}
|
||||
|
||||
// TODO (adammck): Use a proper multi-error struct, so the individual errors
|
||||
// can be extracted if anyone cares.
|
||||
if len(e) > 0 {
|
||||
return c, fmt.Errorf("errors while parsing encodings: %s", strings.Join(e, ", "))
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// parseCoding parses a single conding (content-coding with an optional qvalue),
|
||||
// as might appear in an Accept-Encoding header. It attempts to forgive minor
|
||||
// formatting errors.
|
||||
func parseCoding(s string) (coding string, qvalue float64, err error) {
|
||||
for n, part := range strings.Split(s, ";") {
|
||||
part = strings.TrimSpace(part)
|
||||
qvalue = DefaultQValue
|
||||
|
||||
if n == 0 {
|
||||
coding = strings.ToLower(part)
|
||||
} else if strings.HasPrefix(part, "q=") {
|
||||
qvalue, err = strconv.ParseFloat(strings.TrimPrefix(part, "q="), 64)
|
||||
|
||||
if qvalue < 0.0 {
|
||||
qvalue = 0.0
|
||||
} else if qvalue > 1.0 {
|
||||
qvalue = 1.0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if coding == "" {
|
||||
err = fmt.Errorf("empty content-coding")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
43
e2e/vendor/github.com/NYTimes/gziphandler/gzip_go18.go
generated
vendored
Normal file
43
e2e/vendor/github.com/NYTimes/gziphandler/gzip_go18.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// +build go1.8
|
||||
|
||||
package gziphandler
|
||||
|
||||
import "net/http"
|
||||
|
||||
// Push initiates an HTTP/2 server push.
|
||||
// Push returns ErrNotSupported if the client has disabled push or if push
|
||||
// is not supported on the underlying connection.
|
||||
func (w *GzipResponseWriter) Push(target string, opts *http.PushOptions) error {
|
||||
pusher, ok := w.ResponseWriter.(http.Pusher)
|
||||
if ok && pusher != nil {
|
||||
return pusher.Push(target, setAcceptEncodingForPushOptions(opts))
|
||||
}
|
||||
return http.ErrNotSupported
|
||||
}
|
||||
|
||||
// setAcceptEncodingForPushOptions sets "Accept-Encoding" : "gzip" for PushOptions without overriding existing headers.
|
||||
func setAcceptEncodingForPushOptions(opts *http.PushOptions) *http.PushOptions {
|
||||
|
||||
if opts == nil {
|
||||
opts = &http.PushOptions{
|
||||
Header: http.Header{
|
||||
acceptEncoding: []string{"gzip"},
|
||||
},
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
if opts.Header == nil {
|
||||
opts.Header = http.Header{
|
||||
acceptEncoding: []string{"gzip"},
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
if encoding := opts.Header.Get(acceptEncoding); encoding == "" {
|
||||
opts.Header.Add(acceptEncoding, "gzip")
|
||||
return opts
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
18
e2e/vendor/github.com/antlr4-go/antlr/v4/.gitignore
generated
vendored
Normal file
18
e2e/vendor/github.com/antlr4-go/antlr/v4/.gitignore
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
### Go template
|
||||
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
|
||||
# Go workspace file
|
||||
go.work
|
||||
|
||||
# No Goland stuff in this repo
|
||||
.idea
|
28
e2e/vendor/github.com/antlr4-go/antlr/v4/LICENSE
generated
vendored
Normal file
28
e2e/vendor/github.com/antlr4-go/antlr/v4/LICENSE
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither name of copyright holders nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
|
||||
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
54
e2e/vendor/github.com/antlr4-go/antlr/v4/README.md
generated
vendored
Normal file
54
e2e/vendor/github.com/antlr4-go/antlr/v4/README.md
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
[](https://goreportcard.com/report/github.com/antlr4-go/antlr)
|
||||
[](https://pkg.go.dev/github.com/antlr4-go/antlr)
|
||||
[](https://github.com/antlr4-go/antlr/releases/latest)
|
||||
[](https://github.com/antlr4-go/antlr/releases/latest)
|
||||
[](https://github.com/antlr4-go/antlr/commit-activity)
|
||||
[](https://opensource.org/licenses/BSD-3-Clause)
|
||||
[](https://GitHub.com/Naereen/StrapDown.js/stargazers/)
|
||||
# ANTLR4 Go Runtime Module Repo
|
||||
|
||||
IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here.
|
||||
|
||||
- Do not submit PRs or any change requests to this repo
|
||||
- This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR
|
||||
- This repo contains the Go runtime that your generated projects should import
|
||||
|
||||
## Introduction
|
||||
|
||||
This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained
|
||||
at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create
|
||||
the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here.
|
||||
|
||||
The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically.
|
||||
|
||||
### Why?
|
||||
|
||||
The `go get` command is unable to retrieve the Go runtime when it is embedded so
|
||||
deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime,
|
||||
does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and
|
||||
causes confusion.
|
||||
|
||||
For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as:
|
||||
|
||||
```sh
|
||||
require (
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc
|
||||
)
|
||||
```
|
||||
|
||||
Where you would expect to see:
|
||||
|
||||
```sh
|
||||
require (
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0
|
||||
)
|
||||
```
|
||||
|
||||
The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and
|
||||
from whence users can expect `go get` to behave as expected.
|
||||
|
||||
|
||||
# Documentation
|
||||
Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on
|
||||
migrating existing projects to use the new module location and for information on how to use the Go runtime in
|
||||
general.
|
102
e2e/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
generated
vendored
Normal file
102
e2e/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
Package antlr implements the Go version of the ANTLR 4 runtime.
|
||||
|
||||
# The ANTLR Tool
|
||||
|
||||
ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
|
||||
or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
|
||||
From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
|
||||
(or visitor) that makes it easy to respond to the recognition of phrases of interest.
|
||||
|
||||
# Go Runtime
|
||||
|
||||
At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime
|
||||
source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of
|
||||
ANTLR4 that it is compatible with (I.E. uses the /v4 path).
|
||||
|
||||
However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root
|
||||
of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code.
|
||||
This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not
|
||||
list the release tag such as @4.12.0 - this was confusing, to say the least.
|
||||
|
||||
As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr`
|
||||
(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information,
|
||||
which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs].
|
||||
|
||||
This means that if you are using the source code without modules, you should also use the source code in the [new repo].
|
||||
Though we highly recommend that you use go modules, as they are now idiomatic for Go.
|
||||
|
||||
I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good.
|
||||
|
||||
Go runtime author: [Jim Idle] jimi@idle.ws
|
||||
|
||||
# Code Generation
|
||||
|
||||
ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
|
||||
runtime library, written specifically to support the generated code in the target language. This library is the
|
||||
runtime for the Go target.
|
||||
|
||||
To generate code for the go target, it is generally recommended to place the source grammar files in a package of
|
||||
their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
|
||||
it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
|
||||
that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other
|
||||
way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
|
||||
your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as
|
||||
it was at any point in its history.
|
||||
|
||||
Here is a general/recommended template for an ANTLR based recognizer in Go:
|
||||
|
||||
.
|
||||
├── parser
|
||||
│ ├── mygrammar.g4
|
||||
│ ├── antlr-4.12.1-complete.jar
|
||||
│ ├── generate.go
|
||||
│ └── generate.sh
|
||||
├── parsing - generated code goes here
|
||||
│ └── error_listeners.go
|
||||
├── go.mod
|
||||
├── go.sum
|
||||
├── main.go
|
||||
└── main_test.go
|
||||
|
||||
Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in.
|
||||
|
||||
The generate.go file then looks like this:
|
||||
|
||||
package parser
|
||||
|
||||
//go:generate ./generate.sh
|
||||
|
||||
And the generate.sh file will look similar to this:
|
||||
|
||||
#!/bin/sh
|
||||
|
||||
alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
|
||||
antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4
|
||||
|
||||
depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here
|
||||
is to generate the code into a
|
||||
|
||||
From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command:
|
||||
|
||||
go generate ./...
|
||||
|
||||
Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code
|
||||
by importing the parsing package.
|
||||
|
||||
There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like.
|
||||
|
||||
# Copyright Notice
|
||||
|
||||
Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
|
||||
|
||||
Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
|
||||
|
||||
[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
|
||||
[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
|
||||
[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md
|
||||
[new repo]: https://github.com/antlr4-go/antlr
|
||||
[Jim Idle]: https://github.com/jimidle
|
||||
[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md
|
||||
*/
|
||||
package antlr
|
179
e2e/vendor/github.com/antlr4-go/antlr/v4/atn.go
generated
vendored
Normal file
179
e2e/vendor/github.com/antlr4-go/antlr/v4/atn.go
generated
vendored
Normal file
@ -0,0 +1,179 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "sync"
|
||||
|
||||
// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or
|
||||
// which is invalid for a particular struct such as [*antlr.BaseRuleContext]
|
||||
var ATNInvalidAltNumber int
|
||||
|
||||
// ATN represents an “[Augmented Transition Network]”, though general in ANTLR the term
|
||||
// “Augmented Recursive Transition Network” though there are some descriptions of “[Recursive Transition Network]”
|
||||
// in existence.
|
||||
//
|
||||
// ATNs represent the main networks in the system and are serialized by the code generator and support [ALL(*)].
|
||||
//
|
||||
// [Augmented Transition Network]: https://en.wikipedia.org/wiki/Augmented_transition_network
|
||||
// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
|
||||
// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
|
||||
type ATN struct {
|
||||
|
||||
// DecisionToState is the decision points for all rules, sub-rules, optional
|
||||
// blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we
|
||||
// can go back later and build DFA predictors for them. This includes
|
||||
// all the rules, sub-rules, optional blocks, ()+, ()* etc...
|
||||
DecisionToState []DecisionState
|
||||
|
||||
// grammarType is the ATN type and is used for deserializing ATNs from strings.
|
||||
grammarType int
|
||||
|
||||
// lexerActions is referenced by action transitions in the ATN for lexer ATNs.
|
||||
lexerActions []LexerAction
|
||||
|
||||
// maxTokenType is the maximum value for any symbol recognized by a transition in the ATN.
|
||||
maxTokenType int
|
||||
|
||||
modeNameToStartState map[string]*TokensStartState
|
||||
|
||||
modeToStartState []*TokensStartState
|
||||
|
||||
// ruleToStartState maps from rule index to starting state number.
|
||||
ruleToStartState []*RuleStartState
|
||||
|
||||
// ruleToStopState maps from rule index to stop state number.
|
||||
ruleToStopState []*RuleStopState
|
||||
|
||||
// ruleToTokenType maps the rule index to the resulting token type for lexer
|
||||
// ATNs. For parser ATNs, it maps the rule index to the generated bypass token
|
||||
// type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was
|
||||
// specified, and otherwise is nil.
|
||||
ruleToTokenType []int
|
||||
|
||||
// ATNStates is a list of all states in the ATN, ordered by state number.
|
||||
//
|
||||
states []ATNState
|
||||
|
||||
mu sync.Mutex
|
||||
stateMu sync.RWMutex
|
||||
edgeMu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewATN returns a new ATN struct representing the given grammarType and is used
|
||||
// for runtime deserialization of ATNs from the code generated by the ANTLR tool
|
||||
func NewATN(grammarType int, maxTokenType int) *ATN {
|
||||
return &ATN{
|
||||
grammarType: grammarType,
|
||||
maxTokenType: maxTokenType,
|
||||
modeNameToStartState: make(map[string]*TokensStartState),
|
||||
}
|
||||
}
|
||||
|
||||
// NextTokensInContext computes and returns the set of valid tokens that can occur starting
|
||||
// in state s. If ctx is nil, the set of tokens will not include what can follow
|
||||
// the rule surrounding s. In other words, the set will be restricted to tokens
|
||||
// reachable staying within the rule of s.
|
||||
func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
|
||||
return NewLL1Analyzer(a).Look(s, nil, ctx)
|
||||
}
|
||||
|
||||
// NextTokensNoContext computes and returns the set of valid tokens that can occur starting
|
||||
// in state s and staying in same rule. [antlr.Token.EPSILON] is in set if we reach end of
|
||||
// rule.
|
||||
func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
iset := s.GetNextTokenWithinRule()
|
||||
if iset == nil {
|
||||
iset = a.NextTokensInContext(s, nil)
|
||||
iset.readOnly = true
|
||||
s.SetNextTokenWithinRule(iset)
|
||||
}
|
||||
return iset
|
||||
}
|
||||
|
||||
// NextTokens computes and returns the set of valid tokens starting in state s, by
|
||||
// calling either [NextTokensNoContext] (ctx == nil) or [NextTokensInContext] (ctx != nil).
|
||||
func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
|
||||
if ctx == nil {
|
||||
return a.NextTokensNoContext(s)
|
||||
}
|
||||
|
||||
return a.NextTokensInContext(s, ctx)
|
||||
}
|
||||
|
||||
func (a *ATN) addState(state ATNState) {
|
||||
if state != nil {
|
||||
state.SetATN(a)
|
||||
state.SetStateNumber(len(a.states))
|
||||
}
|
||||
|
||||
a.states = append(a.states, state)
|
||||
}
|
||||
|
||||
func (a *ATN) removeState(state ATNState) {
|
||||
a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice
|
||||
}
|
||||
|
||||
func (a *ATN) defineDecisionState(s DecisionState) int {
|
||||
a.DecisionToState = append(a.DecisionToState, s)
|
||||
s.setDecision(len(a.DecisionToState) - 1)
|
||||
|
||||
return s.getDecision()
|
||||
}
|
||||
|
||||
func (a *ATN) getDecisionState(decision int) DecisionState {
|
||||
if len(a.DecisionToState) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return a.DecisionToState[decision]
|
||||
}
|
||||
|
||||
// getExpectedTokens computes the set of input symbols which could follow ATN
|
||||
// state number stateNumber in the specified full parse context ctx and returns
|
||||
// the set of potentially valid input symbols which could follow the specified
|
||||
// state in the specified context. This method considers the complete parser
|
||||
// context, but does not evaluate semantic predicates (i.e. all predicates
|
||||
// encountered during the calculation are assumed true). If a path in the ATN
|
||||
// exists from the starting state to the RuleStopState of the outermost context
|
||||
// without Matching any symbols, Token.EOF is added to the returned set.
|
||||
//
|
||||
// A nil ctx defaults to ParserRuleContext.EMPTY.
|
||||
//
|
||||
// It panics if the ATN does not contain state stateNumber.
|
||||
func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet {
|
||||
if stateNumber < 0 || stateNumber >= len(a.states) {
|
||||
panic("Invalid state number.")
|
||||
}
|
||||
|
||||
s := a.states[stateNumber]
|
||||
following := a.NextTokens(s, nil)
|
||||
|
||||
if !following.contains(TokenEpsilon) {
|
||||
return following
|
||||
}
|
||||
|
||||
expected := NewIntervalSet()
|
||||
|
||||
expected.addSet(following)
|
||||
expected.removeOne(TokenEpsilon)
|
||||
|
||||
for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
|
||||
invokingState := a.states[ctx.GetInvokingState()]
|
||||
rt := invokingState.GetTransitions()[0]
|
||||
|
||||
following = a.NextTokens(rt.(*RuleTransition).followState, nil)
|
||||
expected.addSet(following)
|
||||
expected.removeOne(TokenEpsilon)
|
||||
ctx = ctx.GetParent().(RuleContext)
|
||||
}
|
||||
|
||||
if following.contains(TokenEpsilon) {
|
||||
expected.addOne(TokenEOF)
|
||||
}
|
||||
|
||||
return expected
|
||||
}
|
335
e2e/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
generated
vendored
Normal file
335
e2e/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
generated
vendored
Normal file
@ -0,0 +1,335 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
lexerConfig = iota // Indicates that this ATNConfig is for a lexer
|
||||
parserConfig // Indicates that this ATNConfig is for a parser
|
||||
)
|
||||
|
||||
// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
|
||||
// context). The syntactic context is a graph-structured stack node whose
|
||||
// path(s) to the root is the rule invocation(s) chain used to arrive in the
|
||||
// state. The semantic context is the tree of semantic predicates encountered
|
||||
// before reaching an ATN state.
|
||||
type ATNConfig struct {
|
||||
precedenceFilterSuppressed bool
|
||||
state ATNState
|
||||
alt int
|
||||
context *PredictionContext
|
||||
semanticContext SemanticContext
|
||||
reachesIntoOuterContext int
|
||||
cType int // lexerConfig or parserConfig
|
||||
lexerActionExecutor *LexerActionExecutor
|
||||
passedThroughNonGreedyDecision bool
|
||||
}
|
||||
|
||||
// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only
|
||||
func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
|
||||
return NewATNConfig5(state, alt, context, SemanticContextNone)
|
||||
}
|
||||
|
||||
// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context
|
||||
func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
|
||||
if semanticContext == nil {
|
||||
panic("semanticContext cannot be nil") // TODO: Necessary?
|
||||
}
|
||||
|
||||
pac := &ATNConfig{}
|
||||
pac.state = state
|
||||
pac.alt = alt
|
||||
pac.context = context
|
||||
pac.semanticContext = semanticContext
|
||||
pac.cType = parserConfig
|
||||
return pac
|
||||
}
|
||||
|
||||
// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only
|
||||
func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
|
||||
return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
|
||||
}
|
||||
|
||||
// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context
|
||||
func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig {
|
||||
return NewATNConfig(c, state, c.GetContext(), semanticContext)
|
||||
}
|
||||
|
||||
// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only
|
||||
func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig {
|
||||
return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
|
||||
}
|
||||
|
||||
// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only
|
||||
func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
|
||||
return NewATNConfig(c, state, context, c.GetSemanticContext())
|
||||
}
|
||||
|
||||
// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors'
|
||||
// are just wrappers around this one.
|
||||
func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
|
||||
if semanticContext == nil {
|
||||
panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed
|
||||
}
|
||||
b := &ATNConfig{}
|
||||
b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext)
|
||||
b.cType = parserConfig
|
||||
return b
|
||||
}
|
||||
|
||||
func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) {
|
||||
|
||||
a.state = state
|
||||
a.alt = alt
|
||||
a.context = context
|
||||
a.semanticContext = semanticContext
|
||||
a.reachesIntoOuterContext = c.GetReachesIntoOuterContext()
|
||||
a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed()
|
||||
}
|
||||
|
||||
func (a *ATNConfig) getPrecedenceFilterSuppressed() bool {
|
||||
return a.precedenceFilterSuppressed
|
||||
}
|
||||
|
||||
func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) {
|
||||
a.precedenceFilterSuppressed = v
|
||||
}
|
||||
|
||||
// GetState returns the ATN state associated with this configuration
|
||||
func (a *ATNConfig) GetState() ATNState {
|
||||
return a.state
|
||||
}
|
||||
|
||||
// GetAlt returns the alternative associated with this configuration
|
||||
func (a *ATNConfig) GetAlt() int {
|
||||
return a.alt
|
||||
}
|
||||
|
||||
// SetContext sets the rule invocation stack associated with this configuration
|
||||
func (a *ATNConfig) SetContext(v *PredictionContext) {
|
||||
a.context = v
|
||||
}
|
||||
|
||||
// GetContext returns the rule invocation stack associated with this configuration
|
||||
func (a *ATNConfig) GetContext() *PredictionContext {
|
||||
return a.context
|
||||
}
|
||||
|
||||
// GetSemanticContext returns the semantic context associated with this configuration
|
||||
func (a *ATNConfig) GetSemanticContext() SemanticContext {
|
||||
return a.semanticContext
|
||||
}
|
||||
|
||||
// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration
|
||||
func (a *ATNConfig) GetReachesIntoOuterContext() int {
|
||||
return a.reachesIntoOuterContext
|
||||
}
|
||||
|
||||
// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration
|
||||
func (a *ATNConfig) SetReachesIntoOuterContext(v int) {
|
||||
a.reachesIntoOuterContext = v
|
||||
}
|
||||
|
||||
// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
|
||||
// for a collection.
|
||||
//
|
||||
// An ATN configuration is equal to another if both have the same state, they
|
||||
// predict the same alternative, and syntactic/semantic contexts are the same.
|
||||
func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool {
|
||||
switch a.cType {
|
||||
case lexerConfig:
|
||||
return a.LEquals(o)
|
||||
case parserConfig:
|
||||
return a.PEquals(o)
|
||||
default:
|
||||
panic("Invalid ATNConfig type")
|
||||
}
|
||||
}
|
||||
|
||||
// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required
|
||||
// for a collection.
|
||||
//
|
||||
// An ATN configuration is equal to another if both have the same state, they
|
||||
// predict the same alternative, and syntactic/semantic contexts are the same.
|
||||
func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool {
|
||||
var other, ok = o.(*ATNConfig)
|
||||
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if a == other {
|
||||
return true
|
||||
} else if other == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var equal bool
|
||||
|
||||
if a.context == nil {
|
||||
equal = other.context == nil
|
||||
} else {
|
||||
equal = a.context.Equals(other.context)
|
||||
}
|
||||
|
||||
var (
|
||||
nums = a.state.GetStateNumber() == other.state.GetStateNumber()
|
||||
alts = a.alt == other.alt
|
||||
cons = a.semanticContext.Equals(other.semanticContext)
|
||||
sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed
|
||||
)
|
||||
|
||||
return nums && alts && cons && sups && equal
|
||||
}
|
||||
|
||||
// Hash is the default hash function for a parser ATNConfig, when no specialist hash function
|
||||
// is required for a collection
|
||||
func (a *ATNConfig) Hash() int {
|
||||
switch a.cType {
|
||||
case lexerConfig:
|
||||
return a.LHash()
|
||||
case parserConfig:
|
||||
return a.PHash()
|
||||
default:
|
||||
panic("Invalid ATNConfig type")
|
||||
}
|
||||
}
|
||||
|
||||
// PHash is the default hash function for a parser ATNConfig, when no specialist hash function
|
||||
// is required for a collection
|
||||
func (a *ATNConfig) PHash() int {
|
||||
var c int
|
||||
if a.context != nil {
|
||||
c = a.context.Hash()
|
||||
}
|
||||
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, a.state.GetStateNumber())
|
||||
h = murmurUpdate(h, a.alt)
|
||||
h = murmurUpdate(h, c)
|
||||
h = murmurUpdate(h, a.semanticContext.Hash())
|
||||
return murmurFinish(h, 4)
|
||||
}
|
||||
|
||||
// String returns a string representation of the ATNConfig, usually used for debugging purposes
|
||||
func (a *ATNConfig) String() string {
|
||||
var s1, s2, s3 string
|
||||
|
||||
if a.context != nil {
|
||||
s1 = ",[" + fmt.Sprint(a.context) + "]"
|
||||
}
|
||||
|
||||
if a.semanticContext != SemanticContextNone {
|
||||
s2 = "," + fmt.Sprint(a.semanticContext)
|
||||
}
|
||||
|
||||
if a.reachesIntoOuterContext > 0 {
|
||||
s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3)
|
||||
}
|
||||
|
||||
func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
|
||||
lac := &ATNConfig{}
|
||||
lac.state = state
|
||||
lac.alt = alt
|
||||
lac.context = context
|
||||
lac.semanticContext = SemanticContextNone
|
||||
lac.cType = lexerConfig
|
||||
return lac
|
||||
}
|
||||
|
||||
func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
|
||||
lac := &ATNConfig{}
|
||||
lac.lexerActionExecutor = c.lexerActionExecutor
|
||||
lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
|
||||
lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
|
||||
lac.cType = lexerConfig
|
||||
return lac
|
||||
}
|
||||
|
||||
func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig {
|
||||
lac := &ATNConfig{}
|
||||
lac.lexerActionExecutor = lexerActionExecutor
|
||||
lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
|
||||
lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
|
||||
lac.cType = lexerConfig
|
||||
return lac
|
||||
}
|
||||
|
||||
func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
|
||||
lac := &ATNConfig{}
|
||||
lac.lexerActionExecutor = c.lexerActionExecutor
|
||||
lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
|
||||
lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext())
|
||||
lac.cType = lexerConfig
|
||||
return lac
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig {
|
||||
lac := &ATNConfig{}
|
||||
lac.state = state
|
||||
lac.alt = alt
|
||||
lac.context = context
|
||||
lac.semanticContext = SemanticContextNone
|
||||
lac.cType = lexerConfig
|
||||
return lac
|
||||
}
|
||||
|
||||
// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via
|
||||
// the default comparator [ObjEqComparator].
|
||||
func (a *ATNConfig) LHash() int {
|
||||
var f int
|
||||
if a.passedThroughNonGreedyDecision {
|
||||
f = 1
|
||||
} else {
|
||||
f = 0
|
||||
}
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, a.state.GetStateNumber())
|
||||
h = murmurUpdate(h, a.alt)
|
||||
h = murmurUpdate(h, a.context.Hash())
|
||||
h = murmurUpdate(h, a.semanticContext.Hash())
|
||||
h = murmurUpdate(h, f)
|
||||
h = murmurUpdate(h, a.lexerActionExecutor.Hash())
|
||||
h = murmurFinish(h, 6)
|
||||
return h
|
||||
}
|
||||
|
||||
// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via
|
||||
// the default comparator [ObjEqComparator].
|
||||
func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool {
|
||||
var otherT, ok = other.(*ATNConfig)
|
||||
if !ok {
|
||||
return false
|
||||
} else if a == otherT {
|
||||
return true
|
||||
} else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision {
|
||||
return false
|
||||
}
|
||||
|
||||
switch {
|
||||
case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil:
|
||||
return true
|
||||
case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil:
|
||||
if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false // One but not both, are nil
|
||||
}
|
||||
|
||||
return a.PEquals(otherT)
|
||||
}
|
||||
|
||||
func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool {
|
||||
var ds, ok = target.(DecisionState)
|
||||
|
||||
return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
|
||||
}
|
301
e2e/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
generated
vendored
Normal file
301
e2e/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
generated
vendored
Normal file
@ -0,0 +1,301 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ATNConfigSet is a specialized set of ATNConfig that tracks information
|
||||
// about its elements and can combine similar configurations using a
|
||||
// graph-structured stack.
|
||||
type ATNConfigSet struct {
|
||||
cachedHash int
|
||||
|
||||
// configLookup is used to determine whether two ATNConfigSets are equal. We
|
||||
// need all configurations with the same (s, i, _, semctx) to be equal. A key
|
||||
// effectively doubles the number of objects associated with ATNConfigs. All
|
||||
// keys are hashed by (s, i, _, pi), not including the context. Wiped out when
|
||||
// read-only because a set becomes a DFA state.
|
||||
configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]]
|
||||
|
||||
// configs is the added elements that did not match an existing key in configLookup
|
||||
configs []*ATNConfig
|
||||
|
||||
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
|
||||
// info together because it saves re-computation. Can we track conflicts as they
|
||||
// are added to save scanning configs later?
|
||||
conflictingAlts *BitSet
|
||||
|
||||
// dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
|
||||
// we hit a pred while computing a closure operation. Do not make a DFA state
|
||||
// from the ATNConfigSet in this case. TODO: How is this used by parsers?
|
||||
dipsIntoOuterContext bool
|
||||
|
||||
// fullCtx is whether it is part of a full context LL prediction. Used to
|
||||
// determine how to merge $. It is a wildcard with SLL, but not for an LL
|
||||
// context merge.
|
||||
fullCtx bool
|
||||
|
||||
// Used in parser and lexer. In lexer, it indicates we hit a pred
|
||||
// while computing a closure operation. Don't make a DFA state from this set.
|
||||
hasSemanticContext bool
|
||||
|
||||
// readOnly is whether it is read-only. Do not
|
||||
// allow any code to manipulate the set if true because DFA states will point at
|
||||
// sets and those must not change. It not, protect other fields; conflictingAlts
|
||||
// in particular, which is assigned after readOnly.
|
||||
readOnly bool
|
||||
|
||||
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
|
||||
// info together because it saves re-computation. Can we track conflicts as they
|
||||
// are added to save scanning configs later?
|
||||
uniqueAlt int
|
||||
}
|
||||
|
||||
// Alts returns the combined set of alts for all the configurations in this set.
|
||||
func (b *ATNConfigSet) Alts() *BitSet {
|
||||
alts := NewBitSet()
|
||||
for _, it := range b.configs {
|
||||
alts.add(it.GetAlt())
|
||||
}
|
||||
return alts
|
||||
}
|
||||
|
||||
// NewATNConfigSet creates a new ATNConfigSet instance.
|
||||
func NewATNConfigSet(fullCtx bool) *ATNConfigSet {
|
||||
return &ATNConfigSet{
|
||||
cachedHash: -1,
|
||||
configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"),
|
||||
fullCtx: fullCtx,
|
||||
}
|
||||
}
|
||||
|
||||
// Add merges contexts with existing configs for (s, i, pi, _),
|
||||
// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and
|
||||
// 'pi' is the [ATNConfig].semanticContext.
|
||||
//
|
||||
// We use (s,i,pi) as the key.
|
||||
// Updates dipsIntoOuterContext and hasSemanticContext when necessary.
|
||||
func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
|
||||
if config.GetSemanticContext() != SemanticContextNone {
|
||||
b.hasSemanticContext = true
|
||||
}
|
||||
|
||||
if config.GetReachesIntoOuterContext() > 0 {
|
||||
b.dipsIntoOuterContext = true
|
||||
}
|
||||
|
||||
existing, present := b.configLookup.Put(config)
|
||||
|
||||
// The config was not already in the set
|
||||
//
|
||||
if !present {
|
||||
b.cachedHash = -1
|
||||
b.configs = append(b.configs, config) // Track order here
|
||||
return true
|
||||
}
|
||||
|
||||
// Merge a previous (s, i, pi, _) with it and save the result
|
||||
rootIsWildcard := !b.fullCtx
|
||||
merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
|
||||
|
||||
// No need to check for existing.context because config.context is in the cache,
|
||||
// since the only way to create new graphs is the "call rule" and here. We cache
|
||||
// at both places.
|
||||
existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
|
||||
|
||||
// Preserve the precedence filter suppression during the merge
|
||||
if config.getPrecedenceFilterSuppressed() {
|
||||
existing.setPrecedenceFilterSuppressed(true)
|
||||
}
|
||||
|
||||
// Replace the context because there is no need to do alt mapping
|
||||
existing.SetContext(merged)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// GetStates returns the set of states represented by all configurations in this config set
|
||||
func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
|
||||
|
||||
// states uses the standard comparator and Hash() provided by the ATNState instance
|
||||
//
|
||||
states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()")
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
states.Put(b.configs[i].GetState())
|
||||
}
|
||||
|
||||
return states
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) GetPredicates() []SemanticContext {
|
||||
predicates := make([]SemanticContext, 0)
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
c := b.configs[i].GetSemanticContext()
|
||||
|
||||
if c != SemanticContextNone {
|
||||
predicates = append(predicates, c)
|
||||
}
|
||||
}
|
||||
|
||||
return predicates
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
|
||||
// Empty indicate no optimization is possible
|
||||
if b.configLookup == nil || b.configLookup.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
config := b.configs[i]
|
||||
config.SetContext(interpreter.getCachedContext(config.GetContext()))
|
||||
}
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool {
|
||||
for i := 0; i < len(coll); i++ {
|
||||
b.Add(coll[i], nil)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Compare The configs are only equal if they are in the same order and their Equals function returns true.
|
||||
// Java uses ArrayList.equals(), which requires the same order.
|
||||
func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool {
|
||||
if len(b.configs) != len(bs.configs) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
if !b.configs[i].Equals(bs.configs[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
|
||||
if b == other {
|
||||
return true
|
||||
} else if _, ok := other.(*ATNConfigSet); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
other2 := other.(*ATNConfigSet)
|
||||
var eca bool
|
||||
switch {
|
||||
case b.conflictingAlts == nil && other2.conflictingAlts == nil:
|
||||
eca = true
|
||||
case b.conflictingAlts != nil && other2.conflictingAlts != nil:
|
||||
eca = b.conflictingAlts.equals(other2.conflictingAlts)
|
||||
}
|
||||
return b.configs != nil &&
|
||||
b.fullCtx == other2.fullCtx &&
|
||||
b.uniqueAlt == other2.uniqueAlt &&
|
||||
eca &&
|
||||
b.hasSemanticContext == other2.hasSemanticContext &&
|
||||
b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
|
||||
b.Compare(other2)
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) Hash() int {
|
||||
if b.readOnly {
|
||||
if b.cachedHash == -1 {
|
||||
b.cachedHash = b.hashCodeConfigs()
|
||||
}
|
||||
|
||||
return b.cachedHash
|
||||
}
|
||||
|
||||
return b.hashCodeConfigs()
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) hashCodeConfigs() int {
|
||||
h := 1
|
||||
for _, config := range b.configs {
|
||||
h = 31*h + config.Hash()
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) Contains(item *ATNConfig) bool {
|
||||
if b.readOnly {
|
||||
panic("not implemented for read-only sets")
|
||||
}
|
||||
if b.configLookup == nil {
|
||||
return false
|
||||
}
|
||||
return b.configLookup.Contains(item)
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool {
|
||||
return b.Contains(item)
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) Clear() {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
b.configs = make([]*ATNConfig, 0)
|
||||
b.cachedHash = -1
|
||||
b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()")
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) String() string {
|
||||
|
||||
s := "["
|
||||
|
||||
for i, c := range b.configs {
|
||||
s += c.String()
|
||||
|
||||
if i != len(b.configs)-1 {
|
||||
s += ", "
|
||||
}
|
||||
}
|
||||
|
||||
s += "]"
|
||||
|
||||
if b.hasSemanticContext {
|
||||
s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
|
||||
}
|
||||
|
||||
if b.uniqueAlt != ATNInvalidAltNumber {
|
||||
s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
|
||||
}
|
||||
|
||||
if b.conflictingAlts != nil {
|
||||
s += ",conflictingAlts=" + b.conflictingAlts.String()
|
||||
}
|
||||
|
||||
if b.dipsIntoOuterContext {
|
||||
s += ",dipsIntoOuterContext"
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair
|
||||
// for use in lexers.
|
||||
func NewOrderedATNConfigSet() *ATNConfigSet {
|
||||
return &ATNConfigSet{
|
||||
cachedHash: -1,
|
||||
// This set uses the standard Hash() and Equals() from ATNConfig
|
||||
configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"),
|
||||
fullCtx: false,
|
||||
}
|
||||
}
|
62
e2e/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
generated
vendored
Normal file
62
e2e/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "errors"
|
||||
|
||||
var defaultATNDeserializationOptions = ATNDeserializationOptions{true, true, false}
|
||||
|
||||
type ATNDeserializationOptions struct {
|
||||
readOnly bool
|
||||
verifyATN bool
|
||||
generateRuleBypassTransitions bool
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) ReadOnly() bool {
|
||||
return opts.readOnly
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
|
||||
if opts.readOnly {
|
||||
panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
|
||||
}
|
||||
opts.readOnly = readOnly
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) VerifyATN() bool {
|
||||
return opts.verifyATN
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
|
||||
if opts.readOnly {
|
||||
panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
|
||||
}
|
||||
opts.verifyATN = verifyATN
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
|
||||
return opts.generateRuleBypassTransitions
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
|
||||
if opts.readOnly {
|
||||
panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
|
||||
}
|
||||
opts.generateRuleBypassTransitions = generateRuleBypassTransitions
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
|
||||
return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
|
||||
}
|
||||
|
||||
func NewATNDeserializationOptions(other *ATNDeserializationOptions) *ATNDeserializationOptions {
|
||||
o := new(ATNDeserializationOptions)
|
||||
if other != nil {
|
||||
*o = *other
|
||||
o.readOnly = false
|
||||
}
|
||||
return o
|
||||
}
|
684
e2e/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
generated
vendored
Normal file
684
e2e/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
generated
vendored
Normal file
@ -0,0 +1,684 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const serializedVersion = 4
|
||||
|
||||
type loopEndStateIntPair struct {
|
||||
item0 *LoopEndState
|
||||
item1 int
|
||||
}
|
||||
|
||||
type blockStartStateIntPair struct {
|
||||
item0 BlockStartState
|
||||
item1 int
|
||||
}
|
||||
|
||||
type ATNDeserializer struct {
|
||||
options *ATNDeserializationOptions
|
||||
data []int32
|
||||
pos int
|
||||
}
|
||||
|
||||
func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
|
||||
if options == nil {
|
||||
options = &defaultATNDeserializationOptions
|
||||
}
|
||||
|
||||
return &ATNDeserializer{options: options}
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedFunction
|
||||
func stringInSlice(a string, list []string) int {
|
||||
for i, b := range list {
|
||||
if b == a {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) Deserialize(data []int32) *ATN {
|
||||
a.data = data
|
||||
a.pos = 0
|
||||
a.checkVersion()
|
||||
|
||||
atn := a.readATN()
|
||||
|
||||
a.readStates(atn)
|
||||
a.readRules(atn)
|
||||
a.readModes(atn)
|
||||
|
||||
sets := a.readSets(atn, nil)
|
||||
|
||||
a.readEdges(atn, sets)
|
||||
a.readDecisions(atn)
|
||||
a.readLexerActions(atn)
|
||||
a.markPrecedenceDecisions(atn)
|
||||
a.verifyATN(atn)
|
||||
|
||||
if a.options.GenerateRuleBypassTransitions() && atn.grammarType == ATNTypeParser {
|
||||
a.generateRuleBypassTransitions(atn)
|
||||
// Re-verify after modification
|
||||
a.verifyATN(atn)
|
||||
}
|
||||
|
||||
return atn
|
||||
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) checkVersion() {
|
||||
version := a.readInt()
|
||||
|
||||
if version != serializedVersion {
|
||||
panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(serializedVersion) + ").")
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readATN() *ATN {
|
||||
grammarType := a.readInt()
|
||||
maxTokenType := a.readInt()
|
||||
|
||||
return NewATN(grammarType, maxTokenType)
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readStates(atn *ATN) {
|
||||
nstates := a.readInt()
|
||||
|
||||
// Allocate worst case size.
|
||||
loopBackStateNumbers := make([]loopEndStateIntPair, 0, nstates)
|
||||
endStateNumbers := make([]blockStartStateIntPair, 0, nstates)
|
||||
|
||||
// Preallocate states slice.
|
||||
atn.states = make([]ATNState, 0, nstates)
|
||||
|
||||
for i := 0; i < nstates; i++ {
|
||||
stype := a.readInt()
|
||||
|
||||
// Ignore bad types of states
|
||||
if stype == ATNStateInvalidType {
|
||||
atn.addState(nil)
|
||||
continue
|
||||
}
|
||||
|
||||
ruleIndex := a.readInt()
|
||||
|
||||
s := a.stateFactory(stype, ruleIndex)
|
||||
|
||||
if stype == ATNStateLoopEnd {
|
||||
loopBackStateNumber := a.readInt()
|
||||
|
||||
loopBackStateNumbers = append(loopBackStateNumbers, loopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
|
||||
} else if s2, ok := s.(BlockStartState); ok {
|
||||
endStateNumber := a.readInt()
|
||||
|
||||
endStateNumbers = append(endStateNumbers, blockStartStateIntPair{s2, endStateNumber})
|
||||
}
|
||||
|
||||
atn.addState(s)
|
||||
}
|
||||
|
||||
// Delay the assignment of loop back and end states until we know all the state
|
||||
// instances have been initialized
|
||||
for _, pair := range loopBackStateNumbers {
|
||||
pair.item0.loopBackState = atn.states[pair.item1]
|
||||
}
|
||||
|
||||
for _, pair := range endStateNumbers {
|
||||
pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState))
|
||||
}
|
||||
|
||||
numNonGreedyStates := a.readInt()
|
||||
for j := 0; j < numNonGreedyStates; j++ {
|
||||
stateNumber := a.readInt()
|
||||
|
||||
atn.states[stateNumber].(DecisionState).setNonGreedy(true)
|
||||
}
|
||||
|
||||
numPrecedenceStates := a.readInt()
|
||||
for j := 0; j < numPrecedenceStates; j++ {
|
||||
stateNumber := a.readInt()
|
||||
|
||||
atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readRules(atn *ATN) {
|
||||
nrules := a.readInt()
|
||||
|
||||
if atn.grammarType == ATNTypeLexer {
|
||||
atn.ruleToTokenType = make([]int, nrules)
|
||||
}
|
||||
|
||||
atn.ruleToStartState = make([]*RuleStartState, nrules)
|
||||
|
||||
for i := range atn.ruleToStartState {
|
||||
s := a.readInt()
|
||||
startState := atn.states[s].(*RuleStartState)
|
||||
|
||||
atn.ruleToStartState[i] = startState
|
||||
|
||||
if atn.grammarType == ATNTypeLexer {
|
||||
tokenType := a.readInt()
|
||||
|
||||
atn.ruleToTokenType[i] = tokenType
|
||||
}
|
||||
}
|
||||
|
||||
atn.ruleToStopState = make([]*RuleStopState, nrules)
|
||||
|
||||
for _, state := range atn.states {
|
||||
if s2, ok := state.(*RuleStopState); ok {
|
||||
atn.ruleToStopState[s2.ruleIndex] = s2
|
||||
atn.ruleToStartState[s2.ruleIndex].stopState = s2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readModes(atn *ATN) {
|
||||
nmodes := a.readInt()
|
||||
atn.modeToStartState = make([]*TokensStartState, nmodes)
|
||||
|
||||
for i := range atn.modeToStartState {
|
||||
s := a.readInt()
|
||||
|
||||
atn.modeToStartState[i] = atn.states[s].(*TokensStartState)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet {
|
||||
m := a.readInt()
|
||||
|
||||
// Preallocate the needed capacity.
|
||||
if cap(sets)-len(sets) < m {
|
||||
isets := make([]*IntervalSet, len(sets), len(sets)+m)
|
||||
copy(isets, sets)
|
||||
sets = isets
|
||||
}
|
||||
|
||||
for i := 0; i < m; i++ {
|
||||
iset := NewIntervalSet()
|
||||
|
||||
sets = append(sets, iset)
|
||||
|
||||
n := a.readInt()
|
||||
containsEOF := a.readInt()
|
||||
|
||||
if containsEOF != 0 {
|
||||
iset.addOne(-1)
|
||||
}
|
||||
|
||||
for j := 0; j < n; j++ {
|
||||
i1 := a.readInt()
|
||||
i2 := a.readInt()
|
||||
|
||||
iset.addRange(i1, i2)
|
||||
}
|
||||
}
|
||||
|
||||
return sets
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
|
||||
nedges := a.readInt()
|
||||
|
||||
for i := 0; i < nedges; i++ {
|
||||
var (
|
||||
src = a.readInt()
|
||||
trg = a.readInt()
|
||||
ttype = a.readInt()
|
||||
arg1 = a.readInt()
|
||||
arg2 = a.readInt()
|
||||
arg3 = a.readInt()
|
||||
trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
|
||||
srcState = atn.states[src]
|
||||
)
|
||||
|
||||
srcState.AddTransition(trans, -1)
|
||||
}
|
||||
|
||||
// Edges for rule stop states can be derived, so they are not serialized
|
||||
for _, state := range atn.states {
|
||||
for _, t := range state.GetTransitions() {
|
||||
var rt, ok = t.(*RuleTransition)
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
outermostPrecedenceReturn := -1
|
||||
|
||||
if atn.ruleToStartState[rt.getTarget().GetRuleIndex()].isPrecedenceRule {
|
||||
if rt.precedence == 0 {
|
||||
outermostPrecedenceReturn = rt.getTarget().GetRuleIndex()
|
||||
}
|
||||
}
|
||||
|
||||
trans := NewEpsilonTransition(rt.followState, outermostPrecedenceReturn)
|
||||
|
||||
atn.ruleToStopState[rt.getTarget().GetRuleIndex()].AddTransition(trans, -1)
|
||||
}
|
||||
}
|
||||
|
||||
for _, state := range atn.states {
|
||||
if s2, ok := state.(BlockStartState); ok {
|
||||
// We need to know the end state to set its start state
|
||||
if s2.getEndState() == nil {
|
||||
panic("IllegalState")
|
||||
}
|
||||
|
||||
// Block end states can only be associated to a single block start state
|
||||
if s2.getEndState().startState != nil {
|
||||
panic("IllegalState")
|
||||
}
|
||||
|
||||
s2.getEndState().startState = state
|
||||
}
|
||||
|
||||
if s2, ok := state.(*PlusLoopbackState); ok {
|
||||
for _, t := range s2.GetTransitions() {
|
||||
if t2, ok := t.getTarget().(*PlusBlockStartState); ok {
|
||||
t2.loopBackState = state
|
||||
}
|
||||
}
|
||||
} else if s2, ok := state.(*StarLoopbackState); ok {
|
||||
for _, t := range s2.GetTransitions() {
|
||||
if t2, ok := t.getTarget().(*StarLoopEntryState); ok {
|
||||
t2.loopBackState = state
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readDecisions(atn *ATN) {
|
||||
ndecisions := a.readInt()
|
||||
|
||||
for i := 0; i < ndecisions; i++ {
|
||||
s := a.readInt()
|
||||
decState := atn.states[s].(DecisionState)
|
||||
|
||||
atn.DecisionToState = append(atn.DecisionToState, decState)
|
||||
decState.setDecision(i)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readLexerActions(atn *ATN) {
|
||||
if atn.grammarType == ATNTypeLexer {
|
||||
count := a.readInt()
|
||||
|
||||
atn.lexerActions = make([]LexerAction, count)
|
||||
|
||||
for i := range atn.lexerActions {
|
||||
actionType := a.readInt()
|
||||
data1 := a.readInt()
|
||||
data2 := a.readInt()
|
||||
atn.lexerActions[i] = a.lexerActionFactory(actionType, data1, data2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) {
|
||||
count := len(atn.ruleToStartState)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
|
||||
}
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
a.generateRuleBypassTransition(atn, i)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
|
||||
bypassStart := NewBasicBlockStartState()
|
||||
|
||||
bypassStart.ruleIndex = idx
|
||||
atn.addState(bypassStart)
|
||||
|
||||
bypassStop := NewBlockEndState()
|
||||
|
||||
bypassStop.ruleIndex = idx
|
||||
atn.addState(bypassStop)
|
||||
|
||||
bypassStart.endState = bypassStop
|
||||
|
||||
atn.defineDecisionState(&bypassStart.BaseDecisionState)
|
||||
|
||||
bypassStop.startState = bypassStart
|
||||
|
||||
var excludeTransition Transition
|
||||
var endState ATNState
|
||||
|
||||
if atn.ruleToStartState[idx].isPrecedenceRule {
|
||||
// Wrap from the beginning of the rule to the StarLoopEntryState
|
||||
endState = nil
|
||||
|
||||
for i := 0; i < len(atn.states); i++ {
|
||||
state := atn.states[i]
|
||||
|
||||
if a.stateIsEndStateFor(state, idx) != nil {
|
||||
endState = state
|
||||
excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0]
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if excludeTransition == nil {
|
||||
panic("Couldn't identify final state of the precedence rule prefix section.")
|
||||
}
|
||||
} else {
|
||||
endState = atn.ruleToStopState[idx]
|
||||
}
|
||||
|
||||
// All non-excluded transitions that currently target end state need to target
|
||||
// blockEnd instead
|
||||
for i := 0; i < len(atn.states); i++ {
|
||||
state := atn.states[i]
|
||||
|
||||
for j := 0; j < len(state.GetTransitions()); j++ {
|
||||
transition := state.GetTransitions()[j]
|
||||
|
||||
if transition == excludeTransition {
|
||||
continue
|
||||
}
|
||||
|
||||
if transition.getTarget() == endState {
|
||||
transition.setTarget(bypassStop)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// All transitions leaving the rule start state need to leave blockStart instead
|
||||
ruleToStartState := atn.ruleToStartState[idx]
|
||||
count := len(ruleToStartState.GetTransitions())
|
||||
|
||||
for count > 0 {
|
||||
bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1)
|
||||
ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]})
|
||||
}
|
||||
|
||||
// Link the new states
|
||||
atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1)
|
||||
bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1)
|
||||
|
||||
MatchState := NewBasicState()
|
||||
|
||||
atn.addState(MatchState)
|
||||
MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
|
||||
bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1)
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState {
|
||||
if state.GetRuleIndex() != idx {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := state.(*StarLoopEntryState); !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
|
||||
|
||||
if _, ok := maybeLoopEndState.(*LoopEndState); !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
|
||||
|
||||
if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok {
|
||||
return state
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// markPrecedenceDecisions analyzes the StarLoopEntryState states in the
|
||||
// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to
|
||||
// the correct value.
|
||||
func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
|
||||
for _, state := range atn.states {
|
||||
if _, ok := state.(*StarLoopEntryState); !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// We analyze the [ATN] to determine if an ATN decision state is the
|
||||
// decision for the closure block that determines whether a
|
||||
// precedence rule should continue or complete.
|
||||
if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
|
||||
maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
|
||||
|
||||
if s3, ok := maybeLoopEndState.(*LoopEndState); ok {
|
||||
var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
|
||||
|
||||
if s3.epsilonOnlyTransitions && ok2 {
|
||||
state.(*StarLoopEntryState).precedenceRuleDecision = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) verifyATN(atn *ATN) {
|
||||
if !a.options.VerifyATN() {
|
||||
return
|
||||
}
|
||||
|
||||
// Verify assumptions
|
||||
for _, state := range atn.states {
|
||||
if state == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "")
|
||||
|
||||
switch s2 := state.(type) {
|
||||
case *PlusBlockStartState:
|
||||
a.checkCondition(s2.loopBackState != nil, "")
|
||||
|
||||
case *StarLoopEntryState:
|
||||
a.checkCondition(s2.loopBackState != nil, "")
|
||||
a.checkCondition(len(s2.GetTransitions()) == 2, "")
|
||||
|
||||
switch s2.transitions[0].getTarget().(type) {
|
||||
case *StarBlockStartState:
|
||||
_, ok := s2.transitions[1].getTarget().(*LoopEndState)
|
||||
|
||||
a.checkCondition(ok, "")
|
||||
a.checkCondition(!s2.nonGreedy, "")
|
||||
|
||||
case *LoopEndState:
|
||||
var _, ok = s2.transitions[1].getTarget().(*StarBlockStartState)
|
||||
|
||||
a.checkCondition(ok, "")
|
||||
a.checkCondition(s2.nonGreedy, "")
|
||||
|
||||
default:
|
||||
panic("IllegalState")
|
||||
}
|
||||
|
||||
case *StarLoopbackState:
|
||||
a.checkCondition(len(state.GetTransitions()) == 1, "")
|
||||
|
||||
var _, ok = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
|
||||
|
||||
a.checkCondition(ok, "")
|
||||
|
||||
case *LoopEndState:
|
||||
a.checkCondition(s2.loopBackState != nil, "")
|
||||
|
||||
case *RuleStartState:
|
||||
a.checkCondition(s2.stopState != nil, "")
|
||||
|
||||
case BlockStartState:
|
||||
a.checkCondition(s2.getEndState() != nil, "")
|
||||
|
||||
case *BlockEndState:
|
||||
a.checkCondition(s2.startState != nil, "")
|
||||
|
||||
case DecisionState:
|
||||
a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "")
|
||||
|
||||
default:
|
||||
var _, ok = s2.(*RuleStopState)
|
||||
|
||||
a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) checkCondition(condition bool, message string) {
|
||||
if !condition {
|
||||
if message == "" {
|
||||
message = "IllegalState"
|
||||
}
|
||||
|
||||
panic(message)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readInt() int {
|
||||
v := a.data[a.pos]
|
||||
|
||||
a.pos++
|
||||
|
||||
return int(v) // data is 32 bits but int is at least that big
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
|
||||
target := atn.states[trg]
|
||||
|
||||
switch typeIndex {
|
||||
case TransitionEPSILON:
|
||||
return NewEpsilonTransition(target, -1)
|
||||
|
||||
case TransitionRANGE:
|
||||
if arg3 != 0 {
|
||||
return NewRangeTransition(target, TokenEOF, arg2)
|
||||
}
|
||||
|
||||
return NewRangeTransition(target, arg1, arg2)
|
||||
|
||||
case TransitionRULE:
|
||||
return NewRuleTransition(atn.states[arg1], arg2, arg3, target)
|
||||
|
||||
case TransitionPREDICATE:
|
||||
return NewPredicateTransition(target, arg1, arg2, arg3 != 0)
|
||||
|
||||
case TransitionPRECEDENCE:
|
||||
return NewPrecedencePredicateTransition(target, arg1)
|
||||
|
||||
case TransitionATOM:
|
||||
if arg3 != 0 {
|
||||
return NewAtomTransition(target, TokenEOF)
|
||||
}
|
||||
|
||||
return NewAtomTransition(target, arg1)
|
||||
|
||||
case TransitionACTION:
|
||||
return NewActionTransition(target, arg1, arg2, arg3 != 0)
|
||||
|
||||
case TransitionSET:
|
||||
return NewSetTransition(target, sets[arg1])
|
||||
|
||||
case TransitionNOTSET:
|
||||
return NewNotSetTransition(target, sets[arg1])
|
||||
|
||||
case TransitionWILDCARD:
|
||||
return NewWildcardTransition(target)
|
||||
}
|
||||
|
||||
panic("The specified transition type is not valid.")
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState {
|
||||
var s ATNState
|
||||
|
||||
switch typeIndex {
|
||||
case ATNStateInvalidType:
|
||||
return nil
|
||||
|
||||
case ATNStateBasic:
|
||||
s = NewBasicState()
|
||||
|
||||
case ATNStateRuleStart:
|
||||
s = NewRuleStartState()
|
||||
|
||||
case ATNStateBlockStart:
|
||||
s = NewBasicBlockStartState()
|
||||
|
||||
case ATNStatePlusBlockStart:
|
||||
s = NewPlusBlockStartState()
|
||||
|
||||
case ATNStateStarBlockStart:
|
||||
s = NewStarBlockStartState()
|
||||
|
||||
case ATNStateTokenStart:
|
||||
s = NewTokensStartState()
|
||||
|
||||
case ATNStateRuleStop:
|
||||
s = NewRuleStopState()
|
||||
|
||||
case ATNStateBlockEnd:
|
||||
s = NewBlockEndState()
|
||||
|
||||
case ATNStateStarLoopBack:
|
||||
s = NewStarLoopbackState()
|
||||
|
||||
case ATNStateStarLoopEntry:
|
||||
s = NewStarLoopEntryState()
|
||||
|
||||
case ATNStatePlusLoopBack:
|
||||
s = NewPlusLoopbackState()
|
||||
|
||||
case ATNStateLoopEnd:
|
||||
s = NewLoopEndState()
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("state type %d is invalid", typeIndex))
|
||||
}
|
||||
|
||||
s.SetRuleIndex(ruleIndex)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction {
|
||||
switch typeIndex {
|
||||
case LexerActionTypeChannel:
|
||||
return NewLexerChannelAction(data1)
|
||||
|
||||
case LexerActionTypeCustom:
|
||||
return NewLexerCustomAction(data1, data2)
|
||||
|
||||
case LexerActionTypeMode:
|
||||
return NewLexerModeAction(data1)
|
||||
|
||||
case LexerActionTypeMore:
|
||||
return LexerMoreActionINSTANCE
|
||||
|
||||
case LexerActionTypePopMode:
|
||||
return LexerPopModeActionINSTANCE
|
||||
|
||||
case LexerActionTypePushMode:
|
||||
return NewLexerPushModeAction(data1)
|
||||
|
||||
case LexerActionTypeSkip:
|
||||
return LexerSkipActionINSTANCE
|
||||
|
||||
case LexerActionTypeType:
|
||||
return NewLexerTypeAction(data1)
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("lexer action %d is invalid", typeIndex))
|
||||
}
|
||||
}
|
41
e2e/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
generated
vendored
Normal file
41
e2e/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false))
|
||||
|
||||
type IATNSimulator interface {
|
||||
SharedContextCache() *PredictionContextCache
|
||||
ATN() *ATN
|
||||
DecisionToDFA() []*DFA
|
||||
}
|
||||
|
||||
type BaseATNSimulator struct {
|
||||
atn *ATN
|
||||
sharedContextCache *PredictionContextCache
|
||||
decisionToDFA []*DFA
|
||||
}
|
||||
|
||||
func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext {
|
||||
if b.sharedContextCache == nil {
|
||||
return context
|
||||
}
|
||||
|
||||
//visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()")
|
||||
visited := NewVisitRecord()
|
||||
return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
|
||||
}
|
||||
|
||||
func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
|
||||
return b.sharedContextCache
|
||||
}
|
||||
|
||||
func (b *BaseATNSimulator) ATN() *ATN {
|
||||
return b.atn
|
||||
}
|
||||
|
||||
func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
|
||||
return b.decisionToDFA
|
||||
}
|
461
e2e/vendor/github.com/antlr4-go/antlr/v4/atn_state.go
generated
vendored
Normal file
461
e2e/vendor/github.com/antlr4-go/antlr/v4/atn_state.go
generated
vendored
Normal file
@ -0,0 +1,461 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Constants for serialization.
|
||||
const (
|
||||
ATNStateInvalidType = 0
|
||||
ATNStateBasic = 1
|
||||
ATNStateRuleStart = 2
|
||||
ATNStateBlockStart = 3
|
||||
ATNStatePlusBlockStart = 4
|
||||
ATNStateStarBlockStart = 5
|
||||
ATNStateTokenStart = 6
|
||||
ATNStateRuleStop = 7
|
||||
ATNStateBlockEnd = 8
|
||||
ATNStateStarLoopBack = 9
|
||||
ATNStateStarLoopEntry = 10
|
||||
ATNStatePlusLoopBack = 11
|
||||
ATNStateLoopEnd = 12
|
||||
|
||||
ATNStateInvalidStateNumber = -1
|
||||
)
|
||||
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var ATNStateInitialNumTransitions = 4
|
||||
|
||||
type ATNState interface {
|
||||
GetEpsilonOnlyTransitions() bool
|
||||
|
||||
GetRuleIndex() int
|
||||
SetRuleIndex(int)
|
||||
|
||||
GetNextTokenWithinRule() *IntervalSet
|
||||
SetNextTokenWithinRule(*IntervalSet)
|
||||
|
||||
GetATN() *ATN
|
||||
SetATN(*ATN)
|
||||
|
||||
GetStateType() int
|
||||
|
||||
GetStateNumber() int
|
||||
SetStateNumber(int)
|
||||
|
||||
GetTransitions() []Transition
|
||||
SetTransitions([]Transition)
|
||||
AddTransition(Transition, int)
|
||||
|
||||
String() string
|
||||
Hash() int
|
||||
Equals(Collectable[ATNState]) bool
|
||||
}
|
||||
|
||||
type BaseATNState struct {
|
||||
// NextTokenWithinRule caches lookahead during parsing. Not used during construction.
|
||||
NextTokenWithinRule *IntervalSet
|
||||
|
||||
// atn is the current ATN.
|
||||
atn *ATN
|
||||
|
||||
epsilonOnlyTransitions bool
|
||||
|
||||
// ruleIndex tracks the Rule index because there are no Rule objects at runtime.
|
||||
ruleIndex int
|
||||
|
||||
stateNumber int
|
||||
|
||||
stateType int
|
||||
|
||||
// Track the transitions emanating from this ATN state.
|
||||
transitions []Transition
|
||||
}
|
||||
|
||||
func NewATNState() *BaseATNState {
|
||||
return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
|
||||
}
|
||||
|
||||
func (as *BaseATNState) GetRuleIndex() int {
|
||||
return as.ruleIndex
|
||||
}
|
||||
|
||||
func (as *BaseATNState) SetRuleIndex(v int) {
|
||||
as.ruleIndex = v
|
||||
}
|
||||
func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
|
||||
return as.epsilonOnlyTransitions
|
||||
}
|
||||
|
||||
func (as *BaseATNState) GetATN() *ATN {
|
||||
return as.atn
|
||||
}
|
||||
|
||||
func (as *BaseATNState) SetATN(atn *ATN) {
|
||||
as.atn = atn
|
||||
}
|
||||
|
||||
func (as *BaseATNState) GetTransitions() []Transition {
|
||||
return as.transitions
|
||||
}
|
||||
|
||||
func (as *BaseATNState) SetTransitions(t []Transition) {
|
||||
as.transitions = t
|
||||
}
|
||||
|
||||
func (as *BaseATNState) GetStateType() int {
|
||||
return as.stateType
|
||||
}
|
||||
|
||||
func (as *BaseATNState) GetStateNumber() int {
|
||||
return as.stateNumber
|
||||
}
|
||||
|
||||
func (as *BaseATNState) SetStateNumber(stateNumber int) {
|
||||
as.stateNumber = stateNumber
|
||||
}
|
||||
|
||||
func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
|
||||
return as.NextTokenWithinRule
|
||||
}
|
||||
|
||||
func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
|
||||
as.NextTokenWithinRule = v
|
||||
}
|
||||
|
||||
func (as *BaseATNState) Hash() int {
|
||||
return as.stateNumber
|
||||
}
|
||||
|
||||
func (as *BaseATNState) String() string {
|
||||
return strconv.Itoa(as.stateNumber)
|
||||
}
|
||||
|
||||
func (as *BaseATNState) Equals(other Collectable[ATNState]) bool {
|
||||
if ot, ok := other.(ATNState); ok {
|
||||
return as.stateNumber == ot.GetStateNumber()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (as *BaseATNState) isNonGreedyExitState() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (as *BaseATNState) AddTransition(trans Transition, index int) {
|
||||
if len(as.transitions) == 0 {
|
||||
as.epsilonOnlyTransitions = trans.getIsEpsilon()
|
||||
} else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
|
||||
_, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber)
|
||||
as.epsilonOnlyTransitions = false
|
||||
}
|
||||
|
||||
// TODO: Check code for already present compared to the Java equivalent
|
||||
//alreadyPresent := false
|
||||
//for _, t := range as.transitions {
|
||||
// if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() {
|
||||
// if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) {
|
||||
// alreadyPresent = true
|
||||
// break
|
||||
// }
|
||||
// } else if t.getIsEpsilon() && trans.getIsEpsilon() {
|
||||
// alreadyPresent = true
|
||||
// break
|
||||
// }
|
||||
//}
|
||||
//if !alreadyPresent {
|
||||
if index == -1 {
|
||||
as.transitions = append(as.transitions, trans)
|
||||
} else {
|
||||
as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
|
||||
// TODO: as.transitions.splice(index, 1, trans)
|
||||
}
|
||||
//} else {
|
||||
// _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber)
|
||||
//}
|
||||
}
|
||||
|
||||
type BasicState struct {
|
||||
BaseATNState
|
||||
}
|
||||
|
||||
func NewBasicState() *BasicState {
|
||||
return &BasicState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateBasic,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type DecisionState interface {
|
||||
ATNState
|
||||
|
||||
getDecision() int
|
||||
setDecision(int)
|
||||
|
||||
getNonGreedy() bool
|
||||
setNonGreedy(bool)
|
||||
}
|
||||
|
||||
type BaseDecisionState struct {
|
||||
BaseATNState
|
||||
decision int
|
||||
nonGreedy bool
|
||||
}
|
||||
|
||||
func NewBaseDecisionState() *BaseDecisionState {
|
||||
return &BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateBasic,
|
||||
},
|
||||
decision: -1,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BaseDecisionState) getDecision() int {
|
||||
return s.decision
|
||||
}
|
||||
|
||||
func (s *BaseDecisionState) setDecision(b int) {
|
||||
s.decision = b
|
||||
}
|
||||
|
||||
func (s *BaseDecisionState) getNonGreedy() bool {
|
||||
return s.nonGreedy
|
||||
}
|
||||
|
||||
func (s *BaseDecisionState) setNonGreedy(b bool) {
|
||||
s.nonGreedy = b
|
||||
}
|
||||
|
||||
type BlockStartState interface {
|
||||
DecisionState
|
||||
|
||||
getEndState() *BlockEndState
|
||||
setEndState(*BlockEndState)
|
||||
}
|
||||
|
||||
// BaseBlockStartState is the start of a regular (...) block.
|
||||
type BaseBlockStartState struct {
|
||||
BaseDecisionState
|
||||
endState *BlockEndState
|
||||
}
|
||||
|
||||
func NewBlockStartState() *BaseBlockStartState {
|
||||
return &BaseBlockStartState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateBasic,
|
||||
},
|
||||
decision: -1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BaseBlockStartState) getEndState() *BlockEndState {
|
||||
return s.endState
|
||||
}
|
||||
|
||||
func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
|
||||
s.endState = b
|
||||
}
|
||||
|
||||
type BasicBlockStartState struct {
|
||||
BaseBlockStartState
|
||||
}
|
||||
|
||||
func NewBasicBlockStartState() *BasicBlockStartState {
|
||||
return &BasicBlockStartState{
|
||||
BaseBlockStartState: BaseBlockStartState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateBlockStart,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var _ BlockStartState = &BasicBlockStartState{}
|
||||
|
||||
// BlockEndState is a terminal node of a simple (a|b|c) block.
|
||||
type BlockEndState struct {
|
||||
BaseATNState
|
||||
startState ATNState
|
||||
}
|
||||
|
||||
func NewBlockEndState() *BlockEndState {
|
||||
return &BlockEndState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateBlockEnd,
|
||||
},
|
||||
startState: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// RuleStopState is the last node in the ATN for a rule, unless that rule is the
|
||||
// start symbol. In that case, there is one transition to EOF. Later, we might
|
||||
// encode references to all calls to this rule to compute FOLLOW sets for error
|
||||
// handling.
|
||||
type RuleStopState struct {
|
||||
BaseATNState
|
||||
}
|
||||
|
||||
func NewRuleStopState() *RuleStopState {
|
||||
return &RuleStopState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateRuleStop,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type RuleStartState struct {
|
||||
BaseATNState
|
||||
stopState ATNState
|
||||
isPrecedenceRule bool
|
||||
}
|
||||
|
||||
func NewRuleStartState() *RuleStartState {
|
||||
return &RuleStartState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateRuleStart,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
|
||||
// transitions: one to the loop back to start of the block, and one to exit.
|
||||
type PlusLoopbackState struct {
|
||||
BaseDecisionState
|
||||
}
|
||||
|
||||
func NewPlusLoopbackState() *PlusLoopbackState {
|
||||
return &PlusLoopbackState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStatePlusLoopBack,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
|
||||
// decision state; we don't use it for code generation. Somebody might need it,
|
||||
// it is included for completeness. In reality, PlusLoopbackState is the real
|
||||
// decision-making node for A+.
|
||||
type PlusBlockStartState struct {
|
||||
BaseBlockStartState
|
||||
loopBackState ATNState
|
||||
}
|
||||
|
||||
func NewPlusBlockStartState() *PlusBlockStartState {
|
||||
return &PlusBlockStartState{
|
||||
BaseBlockStartState: BaseBlockStartState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStatePlusBlockStart,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var _ BlockStartState = &PlusBlockStartState{}
|
||||
|
||||
// StarBlockStartState is the block that begins a closure loop.
|
||||
type StarBlockStartState struct {
|
||||
BaseBlockStartState
|
||||
}
|
||||
|
||||
func NewStarBlockStartState() *StarBlockStartState {
|
||||
return &StarBlockStartState{
|
||||
BaseBlockStartState: BaseBlockStartState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateStarBlockStart,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var _ BlockStartState = &StarBlockStartState{}
|
||||
|
||||
type StarLoopbackState struct {
|
||||
BaseATNState
|
||||
}
|
||||
|
||||
func NewStarLoopbackState() *StarLoopbackState {
|
||||
return &StarLoopbackState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateStarLoopBack,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type StarLoopEntryState struct {
|
||||
BaseDecisionState
|
||||
loopBackState ATNState
|
||||
precedenceRuleDecision bool
|
||||
}
|
||||
|
||||
func NewStarLoopEntryState() *StarLoopEntryState {
|
||||
// False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
|
||||
return &StarLoopEntryState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateStarLoopEntry,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// LoopEndState marks the end of a * or + loop.
|
||||
type LoopEndState struct {
|
||||
BaseATNState
|
||||
loopBackState ATNState
|
||||
}
|
||||
|
||||
func NewLoopEndState() *LoopEndState {
|
||||
return &LoopEndState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateLoopEnd,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
|
||||
type TokensStartState struct {
|
||||
BaseDecisionState
|
||||
}
|
||||
|
||||
func NewTokensStartState() *TokensStartState {
|
||||
return &TokensStartState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateTokenStart,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
11
e2e/vendor/github.com/antlr4-go/antlr/v4/atn_type.go
generated
vendored
Normal file
11
e2e/vendor/github.com/antlr4-go/antlr/v4/atn_type.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// Represent the type of recognizer an ATN applies to.
|
||||
const (
|
||||
ATNTypeLexer = 0
|
||||
ATNTypeParser = 1
|
||||
)
|
12
e2e/vendor/github.com/antlr4-go/antlr/v4/char_stream.go
generated
vendored
Normal file
12
e2e/vendor/github.com/antlr4-go/antlr/v4/char_stream.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
type CharStream interface {
|
||||
IntStream
|
||||
GetText(int, int) string
|
||||
GetTextFromTokens(start, end Token) string
|
||||
GetTextFromInterval(Interval) string
|
||||
}
|
56
e2e/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go
generated
vendored
Normal file
56
e2e/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// TokenFactory creates CommonToken objects.
|
||||
type TokenFactory interface {
|
||||
Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
|
||||
}
|
||||
|
||||
// CommonTokenFactory is the default TokenFactory implementation.
|
||||
type CommonTokenFactory struct {
|
||||
// copyText indicates whether CommonToken.setText should be called after
|
||||
// constructing tokens to explicitly set the text. This is useful for cases
|
||||
// where the input stream might not be able to provide arbitrary substrings of
|
||||
// text from the input after the lexer creates a token (e.g. the
|
||||
// implementation of CharStream.GetText in UnbufferedCharStream panics an
|
||||
// UnsupportedOperationException). Explicitly setting the token text allows
|
||||
// Token.GetText to be called at any time regardless of the input stream
|
||||
// implementation.
|
||||
//
|
||||
// The default value is false to avoid the performance and memory overhead of
|
||||
// copying text for every token unless explicitly requested.
|
||||
copyText bool
|
||||
}
|
||||
|
||||
func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
|
||||
return &CommonTokenFactory{copyText: copyText}
|
||||
}
|
||||
|
||||
// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not
|
||||
// explicitly copy token text when constructing tokens.
|
||||
var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
|
||||
|
||||
func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token {
|
||||
t := NewCommonToken(source, ttype, channel, start, stop)
|
||||
|
||||
t.line = line
|
||||
t.column = column
|
||||
|
||||
if text != "" {
|
||||
t.SetText(text)
|
||||
} else if c.copyText && source.charStream != nil {
|
||||
t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (c *CommonTokenFactory) createThin(ttype int, text string) Token {
|
||||
t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
|
||||
t.SetText(text)
|
||||
|
||||
return t
|
||||
}
|
450
e2e/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
generated
vendored
Normal file
450
e2e/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
generated
vendored
Normal file
@ -0,0 +1,450 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// CommonTokenStream is an implementation of TokenStream that loads tokens from
|
||||
// a TokenSource on-demand and places the tokens in a buffer to provide access
|
||||
// to any previous token by index. This token stream ignores the value of
|
||||
// Token.getChannel. If your parser requires the token stream filter tokens to
|
||||
// only those on a particular channel, such as Token.DEFAULT_CHANNEL or
|
||||
// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
|
||||
type CommonTokenStream struct {
|
||||
channel int
|
||||
|
||||
// fetchedEOF indicates whether the Token.EOF token has been fetched from
|
||||
// tokenSource and added to tokens. This field improves performance for the
|
||||
// following cases:
|
||||
//
|
||||
// consume: The lookahead check in consume to preven consuming the EOF symbol is
|
||||
// optimized by checking the values of fetchedEOF and p instead of calling LA.
|
||||
//
|
||||
// fetch: The check to prevent adding multiple EOF symbols into tokens is
|
||||
// trivial with bt field.
|
||||
fetchedEOF bool
|
||||
|
||||
// index into [tokens] of the current token (next token to consume).
|
||||
// tokens[p] should be LT(1). It is set to -1 when the stream is first
|
||||
// constructed or when SetTokenSource is called, indicating that the first token
|
||||
// has not yet been fetched from the token source. For additional information,
|
||||
// see the documentation of [IntStream] for a description of initializing methods.
|
||||
index int
|
||||
|
||||
// tokenSource is the [TokenSource] from which tokens for the bt stream are
|
||||
// fetched.
|
||||
tokenSource TokenSource
|
||||
|
||||
// tokens contains all tokens fetched from the token source. The list is considered a
|
||||
// complete view of the input once fetchedEOF is set to true.
|
||||
tokens []Token
|
||||
}
|
||||
|
||||
// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce
|
||||
// tokens and will pull tokens from the given lexer channel.
|
||||
func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
|
||||
return &CommonTokenStream{
|
||||
channel: channel,
|
||||
index: -1,
|
||||
tokenSource: lexer,
|
||||
tokens: make([]Token, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllTokens returns all tokens currently pulled from the token source.
|
||||
func (c *CommonTokenStream) GetAllTokens() []Token {
|
||||
return c.tokens
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Mark() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Release(_ int) {}
|
||||
|
||||
func (c *CommonTokenStream) Reset() {
|
||||
c.fetchedEOF = false
|
||||
c.tokens = make([]Token, 0)
|
||||
c.Seek(0)
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Seek(index int) {
|
||||
c.lazyInit()
|
||||
c.index = c.adjustSeekIndex(index)
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Get(index int) Token {
|
||||
c.lazyInit()
|
||||
|
||||
return c.tokens[index]
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Consume() {
|
||||
SkipEOFCheck := false
|
||||
|
||||
if c.index >= 0 {
|
||||
if c.fetchedEOF {
|
||||
// The last token in tokens is EOF. Skip the check if p indexes any fetched.
|
||||
// token except the last.
|
||||
SkipEOFCheck = c.index < len(c.tokens)-1
|
||||
} else {
|
||||
// No EOF token in tokens. Skip the check if p indexes a fetched token.
|
||||
SkipEOFCheck = c.index < len(c.tokens)
|
||||
}
|
||||
} else {
|
||||
// Not yet initialized
|
||||
SkipEOFCheck = false
|
||||
}
|
||||
|
||||
if !SkipEOFCheck && c.LA(1) == TokenEOF {
|
||||
panic("cannot consume EOF")
|
||||
}
|
||||
|
||||
if c.Sync(c.index + 1) {
|
||||
c.index = c.adjustSeekIndex(c.index + 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync makes sure index i in tokens has a token and returns true if a token is
|
||||
// located at index i and otherwise false.
|
||||
func (c *CommonTokenStream) Sync(i int) bool {
|
||||
n := i - len(c.tokens) + 1 // How many more elements do we need?
|
||||
|
||||
if n > 0 {
|
||||
fetched := c.fetch(n)
|
||||
return fetched >= n
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// fetch adds n elements to buffer and returns the actual number of elements
|
||||
// added to the buffer.
|
||||
func (c *CommonTokenStream) fetch(n int) int {
|
||||
if c.fetchedEOF {
|
||||
return 0
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
t := c.tokenSource.NextToken()
|
||||
|
||||
t.SetTokenIndex(len(c.tokens))
|
||||
c.tokens = append(c.tokens, t)
|
||||
|
||||
if t.GetTokenType() == TokenEOF {
|
||||
c.fetchedEOF = true
|
||||
|
||||
return i + 1
|
||||
}
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// GetTokens gets all tokens from start to stop inclusive.
|
||||
func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token {
|
||||
if start < 0 || stop < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.lazyInit()
|
||||
|
||||
subset := make([]Token, 0)
|
||||
|
||||
if stop >= len(c.tokens) {
|
||||
stop = len(c.tokens) - 1
|
||||
}
|
||||
|
||||
for i := start; i < stop; i++ {
|
||||
t := c.tokens[i]
|
||||
|
||||
if t.GetTokenType() == TokenEOF {
|
||||
break
|
||||
}
|
||||
|
||||
if types == nil || types.contains(t.GetTokenType()) {
|
||||
subset = append(subset, t)
|
||||
}
|
||||
}
|
||||
|
||||
return subset
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) LA(i int) int {
|
||||
return c.LT(i).GetTokenType()
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) lazyInit() {
|
||||
if c.index == -1 {
|
||||
c.setup()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) setup() {
|
||||
c.Sync(0)
|
||||
c.index = c.adjustSeekIndex(0)
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetTokenSource() TokenSource {
|
||||
return c.tokenSource
|
||||
}
|
||||
|
||||
// SetTokenSource resets the c token stream by setting its token source.
|
||||
func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
|
||||
c.tokenSource = tokenSource
|
||||
c.tokens = make([]Token, 0)
|
||||
c.index = -1
|
||||
c.fetchedEOF = false
|
||||
}
|
||||
|
||||
// NextTokenOnChannel returns the index of the next token on channel given a
|
||||
// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
|
||||
// no tokens on channel between 'i' and [TokenEOF].
|
||||
func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int {
|
||||
c.Sync(i)
|
||||
|
||||
if i >= len(c.tokens) {
|
||||
return -1
|
||||
}
|
||||
|
||||
token := c.tokens[i]
|
||||
|
||||
for token.GetChannel() != c.channel {
|
||||
if token.GetTokenType() == TokenEOF {
|
||||
return -1
|
||||
}
|
||||
|
||||
i++
|
||||
c.Sync(i)
|
||||
token = c.tokens[i]
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
// previousTokenOnChannel returns the index of the previous token on channel
|
||||
// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if
|
||||
// there are no tokens on channel between i and 0.
|
||||
func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int {
|
||||
for i >= 0 && c.tokens[i].GetChannel() != channel {
|
||||
i--
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
// GetHiddenTokensToRight collects all tokens on a specified channel to the
|
||||
// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
|
||||
// or EOF. If channel is -1, it finds any non-default channel token.
|
||||
func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token {
|
||||
c.lazyInit()
|
||||
|
||||
if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
|
||||
panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
|
||||
}
|
||||
|
||||
nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
|
||||
from := tokenIndex + 1
|
||||
|
||||
// If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token
|
||||
var to int
|
||||
|
||||
if nextOnChannel == -1 {
|
||||
to = len(c.tokens) - 1
|
||||
} else {
|
||||
to = nextOnChannel
|
||||
}
|
||||
|
||||
return c.filterForChannel(from, to, channel)
|
||||
}
|
||||
|
||||
// GetHiddenTokensToLeft collects all tokens on channel to the left of the
|
||||
// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is
|
||||
// -1, it finds any non default channel token.
|
||||
func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token {
|
||||
c.lazyInit()
|
||||
|
||||
if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
|
||||
panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
|
||||
}
|
||||
|
||||
prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel)
|
||||
|
||||
if prevOnChannel == tokenIndex-1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If there are none on channel to the left and prevOnChannel == -1 then from = 0
|
||||
from := prevOnChannel + 1
|
||||
to := tokenIndex - 1
|
||||
|
||||
return c.filterForChannel(from, to, channel)
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token {
|
||||
hidden := make([]Token, 0)
|
||||
|
||||
for i := left; i < right+1; i++ {
|
||||
t := c.tokens[i]
|
||||
|
||||
if channel == -1 {
|
||||
if t.GetChannel() != LexerDefaultTokenChannel {
|
||||
hidden = append(hidden, t)
|
||||
}
|
||||
} else if t.GetChannel() == channel {
|
||||
hidden = append(hidden, t)
|
||||
}
|
||||
}
|
||||
|
||||
if len(hidden) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return hidden
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetSourceName() string {
|
||||
return c.tokenSource.GetSourceName()
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Size() int {
|
||||
return len(c.tokens)
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Index() int {
|
||||
return c.index
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetAllText() string {
|
||||
c.Fill()
|
||||
return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1))
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
|
||||
if start == nil || end == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex()))
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string {
|
||||
return c.GetTextFromInterval(interval.GetSourceInterval())
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string {
|
||||
c.lazyInit()
|
||||
c.Sync(interval.Stop)
|
||||
|
||||
start := interval.Start
|
||||
stop := interval.Stop
|
||||
|
||||
if start < 0 || stop < 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
if stop >= len(c.tokens) {
|
||||
stop = len(c.tokens) - 1
|
||||
}
|
||||
|
||||
s := ""
|
||||
|
||||
for i := start; i < stop+1; i++ {
|
||||
t := c.tokens[i]
|
||||
|
||||
if t.GetTokenType() == TokenEOF {
|
||||
break
|
||||
}
|
||||
|
||||
s += t.GetText()
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Fill gets all tokens from the lexer until EOF.
|
||||
func (c *CommonTokenStream) Fill() {
|
||||
c.lazyInit()
|
||||
|
||||
for c.fetch(1000) == 1000 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) adjustSeekIndex(i int) int {
|
||||
return c.NextTokenOnChannel(i, c.channel)
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) LB(k int) Token {
|
||||
if k == 0 || c.index-k < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
i := c.index
|
||||
n := 1
|
||||
|
||||
// Find k good tokens looking backward
|
||||
for n <= k {
|
||||
// Skip off-channel tokens
|
||||
i = c.previousTokenOnChannel(i-1, c.channel)
|
||||
n++
|
||||
}
|
||||
|
||||
if i < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.tokens[i]
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) LT(k int) Token {
|
||||
c.lazyInit()
|
||||
|
||||
if k == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if k < 0 {
|
||||
return c.LB(-k)
|
||||
}
|
||||
|
||||
i := c.index
|
||||
n := 1 // We know tokens[n] is valid
|
||||
|
||||
// Find k good tokens
|
||||
for n < k {
|
||||
// Skip off-channel tokens, but make sure to not look past EOF
|
||||
if c.Sync(i + 1) {
|
||||
i = c.NextTokenOnChannel(i+1, c.channel)
|
||||
}
|
||||
|
||||
n++
|
||||
}
|
||||
|
||||
return c.tokens[i]
|
||||
}
|
||||
|
||||
// getNumberOfOnChannelTokens counts EOF once.
|
||||
func (c *CommonTokenStream) getNumberOfOnChannelTokens() int {
|
||||
var n int
|
||||
|
||||
c.Fill()
|
||||
|
||||
for i := 0; i < len(c.tokens); i++ {
|
||||
t := c.tokens[i]
|
||||
|
||||
if t.GetChannel() == c.channel {
|
||||
n++
|
||||
}
|
||||
|
||||
if t.GetTokenType() == TokenEOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
150
e2e/vendor/github.com/antlr4-go/antlr/v4/comparators.go
generated
vendored
Normal file
150
e2e/vendor/github.com/antlr4-go/antlr/v4/comparators.go
generated
vendored
Normal file
@ -0,0 +1,150 @@
|
||||
package antlr
|
||||
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
// This file contains all the implementations of custom comparators used for generic collections when the
|
||||
// Hash() and Equals() funcs supplied by the struct objects themselves need to be overridden. Normally, we would
|
||||
// put the comparators in the source file for the struct themselves, but given the organization of this code is
|
||||
// sorta kinda based upon the Java code, I found it confusing trying to find out which comparator was where and used by
|
||||
// which instantiation of a collection. For instance, an Array2DHashSet in the Java source, when used with ATNConfig
|
||||
// collections requires three different comparators depending on what the collection is being used for. Collecting - pun intended -
|
||||
// all the comparators here, makes it much easier to see which implementation of hash and equals is used by which collection.
|
||||
// It also makes it easy to verify that the Hash() and Equals() functions marry up with the Java implementations.
|
||||
|
||||
// ObjEqComparator is the equivalent of the Java ObjectEqualityComparator, which is the default instance of
|
||||
// Equality comparator. We do not have inheritance in Go, only interfaces, so we use generics to enforce some
|
||||
// type safety and avoid having to implement this for every type that we want to perform comparison on.
|
||||
//
|
||||
// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which
|
||||
// allows us to use it in any collection instance that does not require a special hash or equals implementation.
|
||||
type ObjEqComparator[T Collectable[T]] struct{}
|
||||
|
||||
var (
|
||||
aStateEqInst = &ObjEqComparator[ATNState]{}
|
||||
aConfEqInst = &ObjEqComparator[*ATNConfig]{}
|
||||
|
||||
// aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache
|
||||
aConfCompInst = &ATNConfigComparator[*ATNConfig]{}
|
||||
atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{}
|
||||
dfaStateEqInst = &ObjEqComparator[*DFAState]{}
|
||||
semctxEqInst = &ObjEqComparator[SemanticContext]{}
|
||||
atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{}
|
||||
pContextEqInst = &ObjEqComparator[*PredictionContext]{}
|
||||
)
|
||||
|
||||
// Equals2 delegates to the Equals() method of type T
|
||||
func (c *ObjEqComparator[T]) Equals2(o1, o2 T) bool {
|
||||
return o1.Equals(o2)
|
||||
}
|
||||
|
||||
// Hash1 delegates to the Hash() method of type T
|
||||
func (c *ObjEqComparator[T]) Hash1(o T) int {
|
||||
|
||||
return o.Hash()
|
||||
}
|
||||
|
||||
type SemCComparator[T Collectable[T]] struct{}
|
||||
|
||||
// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet
|
||||
// and has a custom Equals() and Hash() implementation, because equality is not based on the
|
||||
// standard Hash() and Equals() methods of the ATNConfig type.
|
||||
type ATNConfigComparator[T Collectable[T]] struct {
|
||||
}
|
||||
|
||||
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
|
||||
func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
|
||||
|
||||
// Same pointer, must be equal, even if both nil
|
||||
//
|
||||
if o1 == o2 {
|
||||
return true
|
||||
|
||||
}
|
||||
|
||||
// If either are nil, but not both, then the result is false
|
||||
//
|
||||
if o1 == nil || o2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
|
||||
o1.GetAlt() == o2.GetAlt() &&
|
||||
o1.GetSemanticContext().Equals(o2.GetSemanticContext())
|
||||
}
|
||||
|
||||
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
|
||||
func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int {
|
||||
|
||||
hash := 7
|
||||
hash = 31*hash + o.GetState().GetStateNumber()
|
||||
hash = 31*hash + o.GetAlt()
|
||||
hash = 31*hash + o.GetSemanticContext().Hash()
|
||||
return hash
|
||||
}
|
||||
|
||||
// ATNAltConfigComparator is used as the comparator for mapping configs to Alt Bitsets
|
||||
type ATNAltConfigComparator[T Collectable[T]] struct {
|
||||
}
|
||||
|
||||
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
|
||||
func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
|
||||
|
||||
// Same pointer, must be equal, even if both nil
|
||||
//
|
||||
if o1 == o2 {
|
||||
return true
|
||||
|
||||
}
|
||||
|
||||
// If either are nil, but not both, then the result is false
|
||||
//
|
||||
if o1 == nil || o2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
|
||||
o1.GetContext().Equals(o2.GetContext())
|
||||
}
|
||||
|
||||
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
|
||||
func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int {
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, o.GetState().GetStateNumber())
|
||||
h = murmurUpdate(h, o.GetContext().Hash())
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet
|
||||
// and has a custom Equals() and Hash() implementation, because equality is not based on the
|
||||
// standard Hash() and Equals() methods of the ATNConfig type.
|
||||
type BaseATNConfigComparator[T Collectable[T]] struct {
|
||||
}
|
||||
|
||||
// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet
|
||||
func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
|
||||
|
||||
// Same pointer, must be equal, even if both nil
|
||||
//
|
||||
if o1 == o2 {
|
||||
return true
|
||||
|
||||
}
|
||||
|
||||
// If either are nil, but not both, then the result is false
|
||||
//
|
||||
if o1 == nil || o2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
|
||||
o1.GetAlt() == o2.GetAlt() &&
|
||||
o1.GetSemanticContext().Equals(o2.GetSemanticContext())
|
||||
}
|
||||
|
||||
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just
|
||||
// delegates to the standard Hash() method of the ATNConfig type.
|
||||
func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int {
|
||||
return o.Hash()
|
||||
}
|
214
e2e/vendor/github.com/antlr4-go/antlr/v4/configuration.go
generated
vendored
Normal file
214
e2e/vendor/github.com/antlr4-go/antlr/v4/configuration.go
generated
vendored
Normal file
@ -0,0 +1,214 @@
|
||||
package antlr
|
||||
|
||||
type runtimeConfiguration struct {
|
||||
statsTraceStacks bool
|
||||
lexerATNSimulatorDebug bool
|
||||
lexerATNSimulatorDFADebug bool
|
||||
parserATNSimulatorDebug bool
|
||||
parserATNSimulatorTraceATNSim bool
|
||||
parserATNSimulatorDFADebug bool
|
||||
parserATNSimulatorRetryDebug bool
|
||||
lRLoopEntryBranchOpt bool
|
||||
memoryManager bool
|
||||
}
|
||||
|
||||
// Global runtime configuration
|
||||
var runtimeConfig = runtimeConfiguration{
|
||||
lRLoopEntryBranchOpt: true,
|
||||
}
|
||||
|
||||
type runtimeOption func(*runtimeConfiguration) error
|
||||
|
||||
// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options.
|
||||
// It uses the functional options pattern for go. This is a package global function as it operates on the runtime
|
||||
// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is
|
||||
// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the
|
||||
// only maintainer). However, it is possible that you might want to use this to set a global option concerning the
|
||||
// memory allocation type used by the runtime such as sync.Pool or not.
|
||||
//
|
||||
// The options are applied in the order they are passed in, so the last option will override any previous options.
|
||||
//
|
||||
// For example, if you want to turn on the collection create point stack flag to true, you can do:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
|
||||
//
|
||||
// If you want to turn it off, you can do:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
|
||||
func ConfigureRuntime(options ...runtimeOption) error {
|
||||
for _, option := range options {
|
||||
err := option(&runtimeConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of
|
||||
// certain structs, such as collections, or the use point of certain methods such as Put().
|
||||
// Because this can be expensive, it is turned off by default. However, it
|
||||
// can be useful to track down exactly where memory is being created and used.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
|
||||
func WithStatsTraceStacks(trace bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.statsTraceStacks = trace
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN]
|
||||
// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
|
||||
// to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false))
|
||||
func WithLexerATNSimulatorDebug(debug bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.lexerATNSimulatorDebug = debug
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA]
|
||||
// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
|
||||
// to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false))
|
||||
func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.lexerATNSimulatorDFADebug = debug
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN]
|
||||
// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
|
||||
// to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false))
|
||||
func WithParserATNSimulatorDebug(debug bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.parserATNSimulatorDebug = debug
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator
|
||||
// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
|
||||
// to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false))
|
||||
func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.parserATNSimulatorTraceATNSim = trace
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
|
||||
// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
|
||||
// to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false))
|
||||
func WithParserATNSimulatorDFADebug(debug bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.parserATNSimulatorDFADebug = debug
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
|
||||
// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime.
|
||||
// Only useful to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false))
|
||||
func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.parserATNSimulatorRetryDebug = debug
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be
|
||||
// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime.
|
||||
// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator].
|
||||
//
|
||||
// Note that default is to use this optimization.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false))
|
||||
func WithLRLoopEntryBranchOpt(off bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.lRLoopEntryBranchOpt = off
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful
|
||||
// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which
|
||||
// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute
|
||||
// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource
|
||||
// grammar, this may help make it more practical.
|
||||
//
|
||||
// Note that default is to use normal Go memory allocation and not pool memory.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithMemoryManager(true))
|
||||
//
|
||||
// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other
|
||||
// and should remember to nil out any references to the parser or lexer when you are done with them.
|
||||
func WithMemoryManager(use bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.memoryManager = use
|
||||
return nil
|
||||
}
|
||||
}
|
175
e2e/vendor/github.com/antlr4-go/antlr/v4/dfa.go
generated
vendored
Normal file
175
e2e/vendor/github.com/antlr4-go/antlr/v4/dfa.go
generated
vendored
Normal file
@ -0,0 +1,175 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can
|
||||
// reach and the transitions between them.
|
||||
type DFA struct {
|
||||
// atnStartState is the ATN state in which this was created
|
||||
atnStartState DecisionState
|
||||
|
||||
decision int
|
||||
|
||||
// states is all the DFA states. Use Map to get the old state back; Set can only
|
||||
// indicate whether it is there. Go maps implement key hash collisions and so on and are very
|
||||
// good, but the DFAState is an object and can't be used directly as the key as it can in say Java
|
||||
// amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
|
||||
// to see if they really are the same object. Hence, we have our own map storage.
|
||||
//
|
||||
states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
|
||||
|
||||
numstates int
|
||||
|
||||
s0 *DFAState
|
||||
|
||||
// precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
|
||||
// True if the DFA is for a precedence decision and false otherwise.
|
||||
precedenceDfa bool
|
||||
}
|
||||
|
||||
func NewDFA(atnStartState DecisionState, decision int) *DFA {
|
||||
dfa := &DFA{
|
||||
atnStartState: atnStartState,
|
||||
decision: decision,
|
||||
states: nil, // Lazy initialize
|
||||
}
|
||||
if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
|
||||
dfa.precedenceDfa = true
|
||||
dfa.s0 = NewDFAState(-1, NewATNConfigSet(false))
|
||||
dfa.s0.isAcceptState = false
|
||||
dfa.s0.requiresFullContext = false
|
||||
}
|
||||
return dfa
|
||||
}
|
||||
|
||||
// getPrecedenceStartState gets the start state for the current precedence and
|
||||
// returns the start state corresponding to the specified precedence if a start
|
||||
// state exists for the specified precedence and nil otherwise. d must be a
|
||||
// precedence DFA. See also isPrecedenceDfa.
|
||||
func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
|
||||
if !d.getPrecedenceDfa() {
|
||||
panic("only precedence DFAs may contain a precedence start state")
|
||||
}
|
||||
|
||||
// s0.edges is never nil for a precedence DFA
|
||||
if precedence < 0 || precedence >= len(d.getS0().getEdges()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return d.getS0().getIthEdge(precedence)
|
||||
}
|
||||
|
||||
// setPrecedenceStartState sets the start state for the current precedence. d
|
||||
// must be a precedence DFA. See also isPrecedenceDfa.
|
||||
func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
|
||||
if !d.getPrecedenceDfa() {
|
||||
panic("only precedence DFAs may contain a precedence start state")
|
||||
}
|
||||
|
||||
if precedence < 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Synchronization on s0 here is ok. When the DFA is turned into a
|
||||
// precedence DFA, s0 will be initialized once and not updated again. s0.edges
|
||||
// is never nil for a precedence DFA.
|
||||
s0 := d.getS0()
|
||||
if precedence >= s0.numEdges() {
|
||||
edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...)
|
||||
s0.setEdges(edges)
|
||||
d.setS0(s0)
|
||||
}
|
||||
|
||||
s0.setIthEdge(precedence, startState)
|
||||
}
|
||||
|
||||
func (d *DFA) getPrecedenceDfa() bool {
|
||||
return d.precedenceDfa
|
||||
}
|
||||
|
||||
// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs
|
||||
// from the current DFA configuration, then d.states is cleared, the initial
|
||||
// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to
|
||||
// store the start states for individual precedence values if precedenceDfa is
|
||||
// true or nil otherwise, and d.precedenceDfa is updated.
|
||||
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
|
||||
if d.getPrecedenceDfa() != precedenceDfa {
|
||||
d.states = nil // Lazy initialize
|
||||
d.numstates = 0
|
||||
|
||||
if precedenceDfa {
|
||||
precedenceState := NewDFAState(-1, NewATNConfigSet(false))
|
||||
precedenceState.setEdges(make([]*DFAState, 0))
|
||||
precedenceState.isAcceptState = false
|
||||
precedenceState.requiresFullContext = false
|
||||
d.setS0(precedenceState)
|
||||
} else {
|
||||
d.setS0(nil)
|
||||
}
|
||||
|
||||
d.precedenceDfa = precedenceDfa
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy
|
||||
// instantiation of the states JMap.
|
||||
func (d *DFA) Len() int {
|
||||
if d.states == nil {
|
||||
return 0
|
||||
}
|
||||
return d.states.Len()
|
||||
}
|
||||
|
||||
// Get returns a state that matches s if it is present in the DFA state set. We defer to this
|
||||
// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap.
|
||||
func (d *DFA) Get(s *DFAState) (*DFAState, bool) {
|
||||
if d.states == nil {
|
||||
return nil, false
|
||||
}
|
||||
return d.states.Get(s)
|
||||
}
|
||||
|
||||
func (d *DFA) Put(s *DFAState) (*DFAState, bool) {
|
||||
if d.states == nil {
|
||||
d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put")
|
||||
}
|
||||
return d.states.Put(s)
|
||||
}
|
||||
|
||||
func (d *DFA) getS0() *DFAState {
|
||||
return d.s0
|
||||
}
|
||||
|
||||
func (d *DFA) setS0(s *DFAState) {
|
||||
d.s0 = s
|
||||
}
|
||||
|
||||
// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil.
|
||||
func (d *DFA) sortedStates() []*DFAState {
|
||||
if d.states == nil {
|
||||
return []*DFAState{}
|
||||
}
|
||||
vs := d.states.SortedSlice(func(i, j *DFAState) bool {
|
||||
return i.stateNumber < j.stateNumber
|
||||
})
|
||||
|
||||
return vs
|
||||
}
|
||||
|
||||
func (d *DFA) String(literalNames []string, symbolicNames []string) string {
|
||||
if d.getS0() == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return NewDFASerializer(d, literalNames, symbolicNames).String()
|
||||
}
|
||||
|
||||
func (d *DFA) ToLexerString() string {
|
||||
if d.getS0() == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return NewLexerDFASerializer(d).String()
|
||||
}
|
158
e2e/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
generated
vendored
Normal file
158
e2e/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized
|
||||
// strings.
|
||||
type DFASerializer struct {
|
||||
dfa *DFA
|
||||
literalNames []string
|
||||
symbolicNames []string
|
||||
}
|
||||
|
||||
func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer {
|
||||
if literalNames == nil {
|
||||
literalNames = make([]string, 0)
|
||||
}
|
||||
|
||||
if symbolicNames == nil {
|
||||
symbolicNames = make([]string, 0)
|
||||
}
|
||||
|
||||
return &DFASerializer{
|
||||
dfa: dfa,
|
||||
literalNames: literalNames,
|
||||
symbolicNames: symbolicNames,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DFASerializer) String() string {
|
||||
if d.dfa.getS0() == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
buf := ""
|
||||
states := d.dfa.sortedStates()
|
||||
|
||||
for _, s := range states {
|
||||
if s.edges != nil {
|
||||
n := len(s.edges)
|
||||
|
||||
for j := 0; j < n; j++ {
|
||||
t := s.edges[j]
|
||||
|
||||
if t != nil && t.stateNumber != 0x7FFFFFFF {
|
||||
buf += d.GetStateString(s)
|
||||
buf += "-"
|
||||
buf += d.getEdgeLabel(j)
|
||||
buf += "->"
|
||||
buf += d.GetStateString(t)
|
||||
buf += "\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(buf) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
func (d *DFASerializer) getEdgeLabel(i int) string {
|
||||
if i == 0 {
|
||||
return "EOF"
|
||||
} else if d.literalNames != nil && i-1 < len(d.literalNames) {
|
||||
return d.literalNames[i-1]
|
||||
} else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) {
|
||||
return d.symbolicNames[i-1]
|
||||
}
|
||||
|
||||
return strconv.Itoa(i - 1)
|
||||
}
|
||||
|
||||
func (d *DFASerializer) GetStateString(s *DFAState) string {
|
||||
var a, b string
|
||||
|
||||
if s.isAcceptState {
|
||||
a = ":"
|
||||
}
|
||||
|
||||
if s.requiresFullContext {
|
||||
b = "^"
|
||||
}
|
||||
|
||||
baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b
|
||||
|
||||
if s.isAcceptState {
|
||||
if s.predicates != nil {
|
||||
return baseStateStr + "=>" + fmt.Sprint(s.predicates)
|
||||
}
|
||||
|
||||
return baseStateStr + "=>" + fmt.Sprint(s.prediction)
|
||||
}
|
||||
|
||||
return baseStateStr
|
||||
}
|
||||
|
||||
type LexerDFASerializer struct {
|
||||
*DFASerializer
|
||||
}
|
||||
|
||||
func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
|
||||
return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)}
|
||||
}
|
||||
|
||||
func (l *LexerDFASerializer) getEdgeLabel(i int) string {
|
||||
var sb strings.Builder
|
||||
sb.Grow(6)
|
||||
sb.WriteByte('\'')
|
||||
sb.WriteRune(rune(i))
|
||||
sb.WriteByte('\'')
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func (l *LexerDFASerializer) String() string {
|
||||
if l.dfa.getS0() == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
buf := ""
|
||||
states := l.dfa.sortedStates()
|
||||
|
||||
for i := 0; i < len(states); i++ {
|
||||
s := states[i]
|
||||
|
||||
if s.edges != nil {
|
||||
n := len(s.edges)
|
||||
|
||||
for j := 0; j < n; j++ {
|
||||
t := s.edges[j]
|
||||
|
||||
if t != nil && t.stateNumber != 0x7FFFFFFF {
|
||||
buf += l.GetStateString(s)
|
||||
buf += "-"
|
||||
buf += l.getEdgeLabel(j)
|
||||
buf += "->"
|
||||
buf += l.GetStateString(t)
|
||||
buf += "\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(buf) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return buf
|
||||
}
|
170
e2e/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
generated
vendored
Normal file
170
e2e/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// PredPrediction maps a predicate to a predicted alternative.
|
||||
type PredPrediction struct {
|
||||
alt int
|
||||
pred SemanticContext
|
||||
}
|
||||
|
||||
func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction {
|
||||
return &PredPrediction{alt: alt, pred: pred}
|
||||
}
|
||||
|
||||
func (p *PredPrediction) String() string {
|
||||
return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
|
||||
}
|
||||
|
||||
// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi,
|
||||
// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
|
||||
// states the ATN can be in after reading each input symbol. That is to say,
|
||||
// after reading input a1, a2,..an, the DFA is in a state that represents the
|
||||
// subset T of the states of the ATN that are reachable from the ATN's start
|
||||
// state along some path labeled a1a2..an."
|
||||
//
|
||||
// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of
|
||||
// states the [ATN] could be in. We need to track the alt predicted by each state
|
||||
// as well, however. More importantly, we need to maintain a stack of states,
|
||||
// tracking the closure operations as they jump from rule to rule, emulating
|
||||
// rule invocations (method calls). I have to add a stack to simulate the proper
|
||||
// lookahead sequences for the underlying LL grammar from which the ATN was
|
||||
// derived.
|
||||
//
|
||||
// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a
|
||||
// state (ala normal conversion) and a [RuleContext] describing the chain of rules
|
||||
// (if any) followed to arrive at that state.
|
||||
//
|
||||
// A [DFAState] may have multiple references to a particular state, but with
|
||||
// different [ATN] contexts (with same or different alts) meaning that state was
|
||||
// reached via a different set of rule invocations.
|
||||
type DFAState struct {
|
||||
stateNumber int
|
||||
configs *ATNConfigSet
|
||||
|
||||
// edges elements point to the target of the symbol. Shift up by 1 so (-1)
|
||||
// Token.EOF maps to the first element.
|
||||
edges []*DFAState
|
||||
|
||||
isAcceptState bool
|
||||
|
||||
// prediction is the 'ttype' we match or alt we predict if the state is 'accept'.
|
||||
// Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
|
||||
// requiresFullContext.
|
||||
prediction int
|
||||
|
||||
lexerActionExecutor *LexerActionExecutor
|
||||
|
||||
// requiresFullContext indicates it was created during an SLL prediction that
|
||||
// discovered a conflict between the configurations in the state. Future
|
||||
// ParserATNSimulator.execATN invocations immediately jump doing
|
||||
// full context prediction if true.
|
||||
requiresFullContext bool
|
||||
|
||||
// predicates is the predicates associated with the ATN configurations of the
|
||||
// DFA state during SLL parsing. When we have predicates, requiresFullContext
|
||||
// is false, since full context prediction evaluates predicates on-the-fly. If
|
||||
// d is
|
||||
// not nil, then prediction is ATN.INVALID_ALT_NUMBER.
|
||||
//
|
||||
// We only use these for non-requiresFullContext but conflicting states. That
|
||||
// means we know from the context (it's $ or we don't dip into outer context)
|
||||
// that it's an ambiguity not a conflict.
|
||||
//
|
||||
// This list is computed by
|
||||
// ParserATNSimulator.predicateDFAState.
|
||||
predicates []*PredPrediction
|
||||
}
|
||||
|
||||
func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState {
|
||||
if configs == nil {
|
||||
configs = NewATNConfigSet(false)
|
||||
}
|
||||
|
||||
return &DFAState{configs: configs, stateNumber: stateNumber}
|
||||
}
|
||||
|
||||
// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
|
||||
func (d *DFAState) GetAltSet() []int {
|
||||
var alts []int
|
||||
|
||||
if d.configs != nil {
|
||||
for _, c := range d.configs.configs {
|
||||
alts = append(alts, c.GetAlt())
|
||||
}
|
||||
}
|
||||
|
||||
if len(alts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return alts
|
||||
}
|
||||
|
||||
func (d *DFAState) getEdges() []*DFAState {
|
||||
return d.edges
|
||||
}
|
||||
|
||||
func (d *DFAState) numEdges() int {
|
||||
return len(d.edges)
|
||||
}
|
||||
|
||||
func (d *DFAState) getIthEdge(i int) *DFAState {
|
||||
return d.edges[i]
|
||||
}
|
||||
|
||||
func (d *DFAState) setEdges(newEdges []*DFAState) {
|
||||
d.edges = newEdges
|
||||
}
|
||||
|
||||
func (d *DFAState) setIthEdge(i int, edge *DFAState) {
|
||||
d.edges[i] = edge
|
||||
}
|
||||
|
||||
func (d *DFAState) setPrediction(v int) {
|
||||
d.prediction = v
|
||||
}
|
||||
|
||||
func (d *DFAState) String() string {
|
||||
var s string
|
||||
if d.isAcceptState {
|
||||
if d.predicates != nil {
|
||||
s = "=>" + fmt.Sprint(d.predicates)
|
||||
} else {
|
||||
s = "=>" + fmt.Sprint(d.prediction)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s)
|
||||
}
|
||||
|
||||
func (d *DFAState) Hash() int {
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, d.configs.Hash())
|
||||
return murmurFinish(h, 1)
|
||||
}
|
||||
|
||||
// Equals returns whether d equals other. Two DFAStates are equal if their ATN
|
||||
// configuration sets are the same. This method is used to see if a state
|
||||
// already exists.
|
||||
//
|
||||
// Because the number of alternatives and number of ATN configurations are
|
||||
// finite, there is a finite number of DFA states that can be processed. This is
|
||||
// necessary to show that the algorithm terminates.
|
||||
//
|
||||
// Cannot test the DFA state numbers here because in
|
||||
// ParserATNSimulator.addDFAState we need to know if any other state exists that
|
||||
// has d exact set of ATN configurations. The stateNumber is irrelevant.
|
||||
func (d *DFAState) Equals(o Collectable[*DFAState]) bool {
|
||||
if d == o {
|
||||
return true
|
||||
}
|
||||
|
||||
return d.configs.Equals(o.(*DFAState).configs)
|
||||
}
|
110
e2e/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
generated
vendored
Normal file
110
e2e/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
//
|
||||
// This implementation of {@link ANTLRErrorListener} can be used to identify
|
||||
// certain potential correctness and performance problems in grammars. "reports"
|
||||
// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate
|
||||
// message.
|
||||
//
|
||||
// <ul>
|
||||
// <li><b>Ambiguities</b>: These are cases where more than one path through the
|
||||
// grammar can Match the input.</li>
|
||||
// <li><b>Weak context sensitivity</b>: These are cases where full-context
|
||||
// prediction resolved an SLL conflict to a unique alternative which equaled the
|
||||
// minimum alternative of the SLL conflict.</li>
|
||||
// <li><b>Strong (forced) context sensitivity</b>: These are cases where the
|
||||
// full-context prediction resolved an SLL conflict to a unique alternative,
|
||||
// <em>and</em> the minimum alternative of the SLL conflict was found to not be
|
||||
// a truly viable alternative. Two-stage parsing cannot be used for inputs where
|
||||
// d situation occurs.</li>
|
||||
// </ul>
|
||||
|
||||
type DiagnosticErrorListener struct {
|
||||
*DefaultErrorListener
|
||||
|
||||
exactOnly bool
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
|
||||
|
||||
n := new(DiagnosticErrorListener)
|
||||
|
||||
// whether all ambiguities or only exact ambiguities are Reported.
|
||||
n.exactOnly = exactOnly
|
||||
return n
|
||||
}
|
||||
|
||||
func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
|
||||
if d.exactOnly && !exact {
|
||||
return
|
||||
}
|
||||
msg := "reportAmbiguity d=" +
|
||||
d.getDecisionDescription(recognizer, dfa) +
|
||||
": ambigAlts=" +
|
||||
d.getConflictingAlts(ambigAlts, configs).String() +
|
||||
", input='" +
|
||||
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
|
||||
recognizer.NotifyErrorListeners(msg, nil, nil)
|
||||
}
|
||||
|
||||
func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) {
|
||||
|
||||
msg := "reportAttemptingFullContext d=" +
|
||||
d.getDecisionDescription(recognizer, dfa) +
|
||||
", input='" +
|
||||
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
|
||||
recognizer.NotifyErrorListeners(msg, nil, nil)
|
||||
}
|
||||
|
||||
func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) {
|
||||
msg := "reportContextSensitivity d=" +
|
||||
d.getDecisionDescription(recognizer, dfa) +
|
||||
", input='" +
|
||||
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
|
||||
recognizer.NotifyErrorListeners(msg, nil, nil)
|
||||
}
|
||||
|
||||
func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string {
|
||||
decision := dfa.decision
|
||||
ruleIndex := dfa.atnStartState.GetRuleIndex()
|
||||
|
||||
ruleNames := recognizer.GetRuleNames()
|
||||
if ruleIndex < 0 || ruleIndex >= len(ruleNames) {
|
||||
return strconv.Itoa(decision)
|
||||
}
|
||||
ruleName := ruleNames[ruleIndex]
|
||||
if ruleName == "" {
|
||||
return strconv.Itoa(decision)
|
||||
}
|
||||
return strconv.Itoa(decision) + " (" + ruleName + ")"
|
||||
}
|
||||
|
||||
// Computes the set of conflicting or ambiguous alternatives from a
|
||||
// configuration set, if that information was not already provided by the
|
||||
// parser.
|
||||
//
|
||||
// @param ReportedAlts The set of conflicting or ambiguous alternatives, as
|
||||
// Reported by the parser.
|
||||
// @param configs The conflicting or ambiguous configuration set.
|
||||
// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
|
||||
// returns the set of alternatives represented in {@code configs}.
|
||||
func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet {
|
||||
if ReportedAlts != nil {
|
||||
return ReportedAlts
|
||||
}
|
||||
result := NewBitSet()
|
||||
for _, c := range set.configs {
|
||||
result.add(c.GetAlt())
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
100
e2e/vendor/github.com/antlr4-go/antlr/v4/error_listener.go
generated
vendored
Normal file
100
e2e/vendor/github.com/antlr4-go/antlr/v4/error_listener.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Provides an empty default implementation of {@link ANTLRErrorListener}. The
|
||||
// default implementation of each method does nothing, but can be overridden as
|
||||
// necessary.
|
||||
|
||||
type ErrorListener interface {
|
||||
SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
|
||||
ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet)
|
||||
ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet)
|
||||
ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet)
|
||||
}
|
||||
|
||||
type DefaultErrorListener struct {
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewDefaultErrorListener() *DefaultErrorListener {
|
||||
return new(DefaultErrorListener)
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) {
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) {
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) {
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) {
|
||||
}
|
||||
|
||||
type ConsoleErrorListener struct {
|
||||
*DefaultErrorListener
|
||||
}
|
||||
|
||||
func NewConsoleErrorListener() *ConsoleErrorListener {
|
||||
return new(ConsoleErrorListener)
|
||||
}
|
||||
|
||||
// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}.
|
||||
var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
|
||||
|
||||
// SyntaxError prints messages to System.err containing the
|
||||
// values of line, charPositionInLine, and msg using
|
||||
// the following format:
|
||||
//
|
||||
// line <line>:<charPositionInLine> <msg>
|
||||
func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) {
|
||||
_, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
|
||||
}
|
||||
|
||||
type ProxyErrorListener struct {
|
||||
*DefaultErrorListener
|
||||
delegates []ErrorListener
|
||||
}
|
||||
|
||||
func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
|
||||
if delegates == nil {
|
||||
panic("delegates is not provided")
|
||||
}
|
||||
l := new(ProxyErrorListener)
|
||||
l.delegates = delegates
|
||||
return l
|
||||
}
|
||||
|
||||
func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
|
||||
for _, d := range p.delegates {
|
||||
d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
|
||||
for _, d := range p.delegates {
|
||||
d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) {
|
||||
for _, d := range p.delegates {
|
||||
d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) {
|
||||
for _, d := range p.delegates {
|
||||
d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
|
||||
}
|
||||
}
|
702
e2e/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
generated
vendored
Normal file
702
e2e/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
generated
vendored
Normal file
@ -0,0 +1,702 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ErrorStrategy interface {
|
||||
reset(Parser)
|
||||
RecoverInline(Parser) Token
|
||||
Recover(Parser, RecognitionException)
|
||||
Sync(Parser)
|
||||
InErrorRecoveryMode(Parser) bool
|
||||
ReportError(Parser, RecognitionException)
|
||||
ReportMatch(Parser)
|
||||
}
|
||||
|
||||
// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for
|
||||
// error reporting and recovery in ANTLR parsers.
|
||||
type DefaultErrorStrategy struct {
|
||||
errorRecoveryMode bool
|
||||
lastErrorIndex int
|
||||
lastErrorStates *IntervalSet
|
||||
}
|
||||
|
||||
var _ ErrorStrategy = &DefaultErrorStrategy{}
|
||||
|
||||
func NewDefaultErrorStrategy() *DefaultErrorStrategy {
|
||||
|
||||
d := new(DefaultErrorStrategy)
|
||||
|
||||
// Indicates whether the error strategy is currently "recovering from an
|
||||
// error". This is used to suppress Reporting multiple error messages while
|
||||
// attempting to recover from a detected syntax error.
|
||||
//
|
||||
// @see //InErrorRecoveryMode
|
||||
//
|
||||
d.errorRecoveryMode = false
|
||||
|
||||
// The index into the input stream where the last error occurred.
|
||||
// This is used to prevent infinite loops where an error is found
|
||||
// but no token is consumed during recovery...another error is found,
|
||||
// ad nauseam. This is a failsafe mechanism to guarantee that at least
|
||||
// one token/tree node is consumed for two errors.
|
||||
//
|
||||
d.lastErrorIndex = -1
|
||||
d.lastErrorStates = nil
|
||||
return d
|
||||
}
|
||||
|
||||
// <p>The default implementation simply calls {@link //endErrorCondition} to
|
||||
// ensure that the handler is not in error recovery mode.</p>
|
||||
func (d *DefaultErrorStrategy) reset(recognizer Parser) {
|
||||
d.endErrorCondition(recognizer)
|
||||
}
|
||||
|
||||
// This method is called to enter error recovery mode when a recognition
|
||||
// exception is Reported.
|
||||
func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) {
|
||||
d.errorRecoveryMode = true
|
||||
}
|
||||
|
||||
func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool {
|
||||
return d.errorRecoveryMode
|
||||
}
|
||||
|
||||
// This method is called to leave error recovery mode after recovering from
|
||||
// a recognition exception.
|
||||
func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) {
|
||||
d.errorRecoveryMode = false
|
||||
d.lastErrorStates = nil
|
||||
d.lastErrorIndex = -1
|
||||
}
|
||||
|
||||
// ReportMatch is the default implementation of error matching and simply calls endErrorCondition.
|
||||
func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
|
||||
d.endErrorCondition(recognizer)
|
||||
}
|
||||
|
||||
// ReportError is the default implementation of error reporting.
|
||||
// It returns immediately if the handler is already
|
||||
// in error recovery mode. Otherwise, it calls [beginErrorCondition]
|
||||
// and dispatches the Reporting task based on the runtime type of e
|
||||
// according to the following table.
|
||||
//
|
||||
// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative]
|
||||
// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch]
|
||||
// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate]
|
||||
// All other types : Calls [NotifyErrorListeners] to Report the exception
|
||||
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
|
||||
// if we've already Reported an error and have not Matched a token
|
||||
// yet successfully, don't Report any errors.
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return // don't Report spurious errors
|
||||
}
|
||||
d.beginErrorCondition(recognizer)
|
||||
|
||||
switch t := e.(type) {
|
||||
default:
|
||||
fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
|
||||
// fmt.Println(e.stack)
|
||||
recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
|
||||
case *NoViableAltException:
|
||||
d.ReportNoViableAlternative(recognizer, t)
|
||||
case *InputMisMatchException:
|
||||
d.ReportInputMisMatch(recognizer, t)
|
||||
case *FailedPredicateException:
|
||||
d.ReportFailedPredicate(recognizer, t)
|
||||
}
|
||||
}
|
||||
|
||||
// Recover is the default recovery implementation.
|
||||
// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set -
|
||||
// loosely the set of tokens that can follow the current rule.
|
||||
func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) {
|
||||
|
||||
if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
|
||||
d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
|
||||
// uh oh, another error at same token index and previously-Visited
|
||||
// state in ATN must be a case where LT(1) is in the recovery
|
||||
// token set so nothing got consumed. Consume a single token
|
||||
// at least to prevent an infinite loop d is a failsafe.
|
||||
recognizer.Consume()
|
||||
}
|
||||
d.lastErrorIndex = recognizer.GetInputStream().Index()
|
||||
if d.lastErrorStates == nil {
|
||||
d.lastErrorStates = NewIntervalSet()
|
||||
}
|
||||
d.lastErrorStates.addOne(recognizer.GetState())
|
||||
followSet := d.GetErrorRecoverySet(recognizer)
|
||||
d.consumeUntil(recognizer, followSet)
|
||||
}
|
||||
|
||||
// Sync is the default implementation of error strategy synchronization.
|
||||
//
|
||||
// This Sync makes sure that the current lookahead symbol is consistent with what were expecting
|
||||
// at this point in the [ATN]. You can call this anytime but ANTLR only
|
||||
// generates code to check before sub-rules/loops and each iteration.
|
||||
//
|
||||
// Implements [Jim Idle]'s magic Sync mechanism in closures and optional
|
||||
// sub-rules. E.g.:
|
||||
//
|
||||
// a : Sync ( stuff Sync )*
|
||||
// Sync : {consume to what can follow Sync}
|
||||
//
|
||||
// At the start of a sub-rule upon error, Sync performs single
|
||||
// token deletion, if possible. If it can't do that, it bails on the current
|
||||
// rule and uses the default error recovery, which consumes until the
|
||||
// reSynchronization set of the current rule.
|
||||
//
|
||||
// If the sub-rule is optional
|
||||
//
|
||||
// ({@code (...)?}, {@code (...)*},
|
||||
//
|
||||
// or a block with an empty alternative), then the expected set includes what follows
|
||||
// the sub-rule.
|
||||
//
|
||||
// During loop iteration, it consumes until it sees a token that can start a
|
||||
// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to
|
||||
// stay in the loop as long as possible.
|
||||
//
|
||||
// # Origins
|
||||
//
|
||||
// Previous versions of ANTLR did a poor job of their recovery within loops.
|
||||
// A single mismatch token or missing token would force the parser to bail
|
||||
// out of the entire rules surrounding the loop. So, for rule:
|
||||
//
|
||||
// classfunc : 'class' ID '{' member* '}'
|
||||
//
|
||||
// input with an extra token between members would force the parser to
|
||||
// consume until it found the next class definition rather than the next
|
||||
// member definition of the current class.
|
||||
//
|
||||
// This functionality cost a bit of effort because the parser has to
|
||||
// compare the token set at the start of the loop and at each iteration. If for
|
||||
// some reason speed is suffering for you, you can turn off this
|
||||
// functionality by simply overriding this method as empty:
|
||||
//
|
||||
// { }
|
||||
//
|
||||
// [Jim Idle]: https://github.com/jimidle
|
||||
func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
|
||||
// If already recovering, don't try to Sync
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return
|
||||
}
|
||||
|
||||
s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
|
||||
la := recognizer.GetTokenStream().LA(1)
|
||||
|
||||
// try cheaper subset first might get lucky. seems to shave a wee bit off
|
||||
nextTokens := recognizer.GetATN().NextTokens(s, nil)
|
||||
if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
|
||||
return
|
||||
}
|
||||
|
||||
switch s.GetStateType() {
|
||||
case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
|
||||
// Report error and recover if possible
|
||||
if d.SingleTokenDeletion(recognizer) != nil {
|
||||
return
|
||||
}
|
||||
recognizer.SetError(NewInputMisMatchException(recognizer))
|
||||
case ATNStatePlusLoopBack, ATNStateStarLoopBack:
|
||||
d.ReportUnwantedToken(recognizer)
|
||||
expecting := NewIntervalSet()
|
||||
expecting.addSet(recognizer.GetExpectedTokens())
|
||||
whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer))
|
||||
d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
|
||||
default:
|
||||
// do nothing if we can't identify the exact kind of ATN state
|
||||
}
|
||||
}
|
||||
|
||||
// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException].
|
||||
//
|
||||
// See also [ReportError]
|
||||
func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
|
||||
tokens := recognizer.GetTokenStream()
|
||||
var input string
|
||||
if tokens != nil {
|
||||
if e.startToken.GetTokenType() == TokenEOF {
|
||||
input = "<EOF>"
|
||||
} else {
|
||||
input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
|
||||
}
|
||||
} else {
|
||||
input = "<unknown input>"
|
||||
}
|
||||
msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
|
||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
||||
}
|
||||
|
||||
// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException]
|
||||
//
|
||||
// See also: [ReportError]
|
||||
func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
|
||||
msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) +
|
||||
" expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
|
||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
||||
}
|
||||
|
||||
// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException].
|
||||
//
|
||||
// See also: [ReportError]
|
||||
func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
|
||||
ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
|
||||
msg := "rule " + ruleName + " " + e.message
|
||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
||||
}
|
||||
|
||||
// ReportUnwantedToken is called to report a syntax error that requires the removal
|
||||
// of a token from the input stream. At the time d method is called, the
|
||||
// erroneous symbol is the current LT(1) symbol and has not yet been
|
||||
// removed from the input stream. When this method returns,
|
||||
// recognizer is in error recovery mode.
|
||||
//
|
||||
// This method is called when singleTokenDeletion identifies
|
||||
// single-token deletion as a viable recovery strategy for a mismatched
|
||||
// input error.
|
||||
//
|
||||
// The default implementation simply returns if the handler is already in
|
||||
// error recovery mode. Otherwise, it calls beginErrorCondition to
|
||||
// enter error recovery mode, followed by calling
|
||||
// [NotifyErrorListeners]
|
||||
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return
|
||||
}
|
||||
d.beginErrorCondition(recognizer)
|
||||
t := recognizer.GetCurrentToken()
|
||||
tokenName := d.GetTokenErrorDisplay(t)
|
||||
expecting := d.GetExpectedTokens(recognizer)
|
||||
msg := "extraneous input " + tokenName + " expecting " +
|
||||
expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
|
||||
recognizer.NotifyErrorListeners(msg, t, nil)
|
||||
}
|
||||
|
||||
// ReportMissingToken is called to report a syntax error which requires the
|
||||
// insertion of a missing token into the input stream. At the time this
|
||||
// method is called, the missing token has not yet been inserted. When this
|
||||
// method returns, recognizer is in error recovery mode.
|
||||
//
|
||||
// This method is called when singleTokenInsertion identifies
|
||||
// single-token insertion as a viable recovery strategy for a mismatched
|
||||
// input error.
|
||||
//
|
||||
// The default implementation simply returns if the handler is already in
|
||||
// error recovery mode. Otherwise, it calls beginErrorCondition to
|
||||
// enter error recovery mode, followed by calling [NotifyErrorListeners]
|
||||
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return
|
||||
}
|
||||
d.beginErrorCondition(recognizer)
|
||||
t := recognizer.GetCurrentToken()
|
||||
expecting := d.GetExpectedTokens(recognizer)
|
||||
msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
|
||||
" at " + d.GetTokenErrorDisplay(t)
|
||||
recognizer.NotifyErrorListeners(msg, t, nil)
|
||||
}
|
||||
|
||||
// The RecoverInline default implementation attempts to recover from the mismatched input
|
||||
// by using single token insertion and deletion as described below. If the
|
||||
// recovery attempt fails, this method panics with [InputMisMatchException}.
|
||||
// TODO: Not sure that panic() is the right thing to do here - JI
|
||||
//
|
||||
// # EXTRA TOKEN (single token deletion)
|
||||
//
|
||||
// LA(1) is not what we are looking for. If LA(2) has the
|
||||
// right token, however, then assume LA(1) is some extra spurious
|
||||
// token and delete it. Then consume and return the next token (which was
|
||||
// the LA(2) token) as the successful result of the Match operation.
|
||||
//
|
||||
// # This recovery strategy is implemented by singleTokenDeletion
|
||||
//
|
||||
// # MISSING TOKEN (single token insertion)
|
||||
//
|
||||
// If current token -at LA(1) - is consistent with what could come
|
||||
// after the expected LA(1) token, then assume the token is missing
|
||||
// and use the parser's [TokenFactory] to create it on the fly. The
|
||||
// “insertion” is performed by returning the created token as the successful
|
||||
// result of the Match operation.
|
||||
//
|
||||
// This recovery strategy is implemented by [SingleTokenInsertion].
|
||||
//
|
||||
// # Example
|
||||
//
|
||||
// For example, Input i=(3 is clearly missing the ')'. When
|
||||
// the parser returns from the nested call to expr, it will have
|
||||
// call the chain:
|
||||
//
|
||||
// stat → expr → atom
|
||||
//
|
||||
// and it will be trying to Match the ')' at this point in the
|
||||
// derivation:
|
||||
//
|
||||
// : ID '=' '(' INT ')' ('+' atom)* ';'
|
||||
// ^
|
||||
//
|
||||
// The attempt to [Match] ')' will fail when it sees ';' and
|
||||
// call [RecoverInline]. To recover, it sees that LA(1)==';'
|
||||
// is in the set of tokens that can follow the ')' token reference
|
||||
// in rule atom. It can assume that you forgot the ')'.
|
||||
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
// SINGLE TOKEN DELETION
|
||||
MatchedSymbol := d.SingleTokenDeletion(recognizer)
|
||||
if MatchedSymbol != nil {
|
||||
// we have deleted the extra token.
|
||||
// now, move past ttype token as if all were ok
|
||||
recognizer.Consume()
|
||||
return MatchedSymbol
|
||||
}
|
||||
// SINGLE TOKEN INSERTION
|
||||
if d.SingleTokenInsertion(recognizer) {
|
||||
return d.GetMissingSymbol(recognizer)
|
||||
}
|
||||
// even that didn't work must panic the exception
|
||||
recognizer.SetError(NewInputMisMatchException(recognizer))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SingleTokenInsertion implements the single-token insertion inline error recovery
|
||||
// strategy. It is called by [RecoverInline] if the single-token
|
||||
// deletion strategy fails to recover from the mismatched input. If this
|
||||
// method returns {@code true}, {@code recognizer} will be in error recovery
|
||||
// mode.
|
||||
//
|
||||
// This method determines whether single-token insertion is viable by
|
||||
// checking if the LA(1) input symbol could be successfully Matched
|
||||
// if it were instead the LA(2) symbol. If this method returns
|
||||
// {@code true}, the caller is responsible for creating and inserting a
|
||||
// token with the correct type to produce this behavior.</p>
|
||||
//
|
||||
// This func returns true if single-token insertion is a viable recovery
|
||||
// strategy for the current mismatched input.
|
||||
func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
|
||||
currentSymbolType := recognizer.GetTokenStream().LA(1)
|
||||
// if current token is consistent with what could come after current
|
||||
// ATN state, then we know we're missing a token error recovery
|
||||
// is free to conjure up and insert the missing token
|
||||
atn := recognizer.GetInterpreter().atn
|
||||
currentState := atn.states[recognizer.GetState()]
|
||||
next := currentState.GetTransitions()[0].getTarget()
|
||||
expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
|
||||
if expectingAtLL2.contains(currentSymbolType) {
|
||||
d.ReportMissingToken(recognizer)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// SingleTokenDeletion implements the single-token deletion inline error recovery
|
||||
// strategy. It is called by [RecoverInline] to attempt to recover
|
||||
// from mismatched input. If this method returns nil, the parser and error
|
||||
// handler state will not have changed. If this method returns non-nil,
|
||||
// recognizer will not be in error recovery mode since the
|
||||
// returned token was a successful Match.
|
||||
//
|
||||
// If the single-token deletion is successful, this method calls
|
||||
// [ReportUnwantedToken] to Report the error, followed by
|
||||
// [Consume] to actually “delete” the extraneous token. Then,
|
||||
// before returning, [ReportMatch] is called to signal a successful
|
||||
// Match.
|
||||
//
|
||||
// The func returns the successfully Matched [Token] instance if single-token
|
||||
// deletion successfully recovers from the mismatched input, otherwise nil.
|
||||
func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
|
||||
NextTokenType := recognizer.GetTokenStream().LA(2)
|
||||
expecting := d.GetExpectedTokens(recognizer)
|
||||
if expecting.contains(NextTokenType) {
|
||||
d.ReportUnwantedToken(recognizer)
|
||||
// print("recoverFromMisMatchedToken deleting " \
|
||||
// + str(recognizer.GetTokenStream().LT(1)) \
|
||||
// + " since " + str(recognizer.GetTokenStream().LT(2)) \
|
||||
// + " is what we want", file=sys.stderr)
|
||||
recognizer.Consume() // simply delete extra token
|
||||
// we want to return the token we're actually Matching
|
||||
MatchedSymbol := recognizer.GetCurrentToken()
|
||||
d.ReportMatch(recognizer) // we know current token is correct
|
||||
return MatchedSymbol
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMissingSymbol conjures up a missing token during error recovery.
|
||||
//
|
||||
// The recognizer attempts to recover from single missing
|
||||
// symbols. But, actions might refer to that missing symbol.
|
||||
// For example:
|
||||
//
|
||||
// x=ID {f($x)}.
|
||||
//
|
||||
// The action clearly assumes
|
||||
// that there has been an identifier Matched previously and that
|
||||
// $x points at that token. If that token is missing, but
|
||||
// the next token in the stream is what we want we assume that
|
||||
// this token is missing, and we keep going. Because we
|
||||
// have to return some token to replace the missing token,
|
||||
// we have to conjure one up. This method gives the user control
|
||||
// over the tokens returned for missing tokens. Mostly,
|
||||
// you will want to create something special for identifier
|
||||
// tokens. For literals such as '{' and ',', the default
|
||||
// action in the parser or tree parser works. It simply creates
|
||||
// a [CommonToken] of the appropriate type. The text will be the token name.
|
||||
// If you need to change which tokens must be created by the lexer,
|
||||
// override this method to create the appropriate tokens.
|
||||
func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
|
||||
currentSymbol := recognizer.GetCurrentToken()
|
||||
expecting := d.GetExpectedTokens(recognizer)
|
||||
expectedTokenType := expecting.first()
|
||||
var tokenText string
|
||||
|
||||
if expectedTokenType == TokenEOF {
|
||||
tokenText = "<missing EOF>"
|
||||
} else {
|
||||
ln := recognizer.GetLiteralNames()
|
||||
if expectedTokenType > 0 && expectedTokenType < len(ln) {
|
||||
tokenText = "<missing " + recognizer.GetLiteralNames()[expectedTokenType] + ">"
|
||||
} else {
|
||||
tokenText = "<missing undefined>" // TODO: matches the JS impl
|
||||
}
|
||||
}
|
||||
current := currentSymbol
|
||||
lookback := recognizer.GetTokenStream().LT(-1)
|
||||
if current.GetTokenType() == TokenEOF && lookback != nil {
|
||||
current = lookback
|
||||
}
|
||||
|
||||
tf := recognizer.GetTokenFactory()
|
||||
|
||||
return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
|
||||
}
|
||||
|
||||
func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
|
||||
return recognizer.GetExpectedTokens()
|
||||
}
|
||||
|
||||
// GetTokenErrorDisplay determines how a token should be displayed in an error message.
|
||||
// The default is to display just the text, but during development you might
|
||||
// want to have a lot of information spit out. Override this func in that case
|
||||
// to use t.String() (which, for [CommonToken], dumps everything about
|
||||
// the token). This is better than forcing you to override a method in
|
||||
// your token objects because you don't have to go modify your lexer
|
||||
// so that it creates a new type.
|
||||
func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
|
||||
if t == nil {
|
||||
return "<no token>"
|
||||
}
|
||||
s := t.GetText()
|
||||
if s == "" {
|
||||
if t.GetTokenType() == TokenEOF {
|
||||
s = "<EOF>"
|
||||
} else {
|
||||
s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
|
||||
}
|
||||
}
|
||||
return d.escapeWSAndQuote(s)
|
||||
}
|
||||
|
||||
func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
|
||||
s = strings.Replace(s, "\t", "\\t", -1)
|
||||
s = strings.Replace(s, "\n", "\\n", -1)
|
||||
s = strings.Replace(s, "\r", "\\r", -1)
|
||||
return "'" + s + "'"
|
||||
}
|
||||
|
||||
// GetErrorRecoverySet computes the error recovery set for the current rule. During
|
||||
// rule invocation, the parser pushes the set of tokens that can
|
||||
// follow that rule reference on the stack. This amounts to
|
||||
// computing FIRST of what follows the rule reference in the
|
||||
// enclosing rule. See LinearApproximator.FIRST().
|
||||
//
|
||||
// This local follow set only includes tokens
|
||||
// from within the rule i.e., the FIRST computation done by
|
||||
// ANTLR stops at the end of a rule.
|
||||
//
|
||||
// # Example
|
||||
//
|
||||
// When you find a "no viable alt exception", the input is not
|
||||
// consistent with any of the alternatives for rule r. The best
|
||||
// thing to do is to consume tokens until you see something that
|
||||
// can legally follow a call to r or any rule that called r.
|
||||
// You don't want the exact set of viable next tokens because the
|
||||
// input might just be missing a token--you might consume the
|
||||
// rest of the input looking for one of the missing tokens.
|
||||
//
|
||||
// Consider the grammar:
|
||||
//
|
||||
// a : '[' b ']'
|
||||
// | '(' b ')'
|
||||
// ;
|
||||
//
|
||||
// b : c '^' INT
|
||||
// ;
|
||||
//
|
||||
// c : ID
|
||||
// | INT
|
||||
// ;
|
||||
//
|
||||
// At each rule invocation, the set of tokens that could follow
|
||||
// that rule is pushed on a stack. Here are the various
|
||||
// context-sensitive follow sets:
|
||||
//
|
||||
// FOLLOW(b1_in_a) = FIRST(']') = ']'
|
||||
// FOLLOW(b2_in_a) = FIRST(')') = ')'
|
||||
// FOLLOW(c_in_b) = FIRST('^') = '^'
|
||||
//
|
||||
// Upon erroneous input “[]”, the call chain is
|
||||
//
|
||||
// a → b → c
|
||||
//
|
||||
// and, hence, the follow context stack is:
|
||||
//
|
||||
// Depth Follow set Start of rule execution
|
||||
// 0 <EOF> a (from main())
|
||||
// 1 ']' b
|
||||
// 2 '^' c
|
||||
//
|
||||
// Notice that ')' is not included, because b would have to have
|
||||
// been called from a different context in rule a for ')' to be
|
||||
// included.
|
||||
//
|
||||
// For error recovery, we cannot consider FOLLOW(c)
|
||||
// (context-sensitive or otherwise). We need the combined set of
|
||||
// all context-sensitive FOLLOW sets - the set of all tokens that
|
||||
// could follow any reference in the call chain. We need to
|
||||
// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
|
||||
// we reSync'd to that token, we'd consume until EOF. We need to
|
||||
// Sync to context-sensitive FOLLOWs for a, b, and c:
|
||||
//
|
||||
// {']','^'}
|
||||
//
|
||||
// In this case, for input "[]", LA(1) is ']' and in the set, so we would
|
||||
// not consume anything. After printing an error, rule c would
|
||||
// return normally. Rule b would not find the required '^' though.
|
||||
// At this point, it gets a mismatched token error and panics an
|
||||
// exception (since LA(1) is not in the viable following token
|
||||
// set). The rule exception handler tries to recover, but finds
|
||||
// the same recovery set and doesn't consume anything. Rule b
|
||||
// exits normally returning to rule a. Now it finds the ']' (and
|
||||
// with the successful Match exits errorRecovery mode).
|
||||
//
|
||||
// So, you can see that the parser walks up the call chain looking
|
||||
// for the token that was a member of the recovery set.
|
||||
//
|
||||
// Errors are not generated in errorRecovery mode.
|
||||
//
|
||||
// ANTLR's error recovery mechanism is based upon original ideas:
|
||||
//
|
||||
// [Algorithms + Data Structures = Programs] by Niklaus Wirth and
|
||||
// [A note on error recovery in recursive descent parsers].
|
||||
//
|
||||
// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent
|
||||
// Parsers]
|
||||
//
|
||||
// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead
|
||||
// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs
|
||||
//
|
||||
// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905
|
||||
// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE
|
||||
// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
|
||||
func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet {
|
||||
atn := recognizer.GetInterpreter().atn
|
||||
ctx := recognizer.GetParserRuleContext()
|
||||
recoverSet := NewIntervalSet()
|
||||
for ctx != nil && ctx.GetInvokingState() >= 0 {
|
||||
// compute what follows who invoked us
|
||||
invokingState := atn.states[ctx.GetInvokingState()]
|
||||
rt := invokingState.GetTransitions()[0]
|
||||
follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
|
||||
recoverSet.addSet(follow)
|
||||
ctx = ctx.GetParent().(ParserRuleContext)
|
||||
}
|
||||
recoverSet.removeOne(TokenEpsilon)
|
||||
return recoverSet
|
||||
}
|
||||
|
||||
// Consume tokens until one Matches the given token set.//
|
||||
func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
|
||||
ttype := recognizer.GetTokenStream().LA(1)
|
||||
for ttype != TokenEOF && !set.contains(ttype) {
|
||||
recognizer.Consume()
|
||||
ttype = recognizer.GetTokenStream().LA(1)
|
||||
}
|
||||
}
|
||||
|
||||
// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors
|
||||
// by immediately canceling the parse operation with a
|
||||
// [ParseCancellationException]. The implementation ensures that the
|
||||
// [ParserRuleContext//exception] field is set for all parse tree nodes
|
||||
// that were not completed prior to encountering the error.
|
||||
//
|
||||
// This error strategy is useful in the following scenarios.
|
||||
//
|
||||
// - Two-stage parsing: This error strategy allows the first
|
||||
// stage of two-stage parsing to immediately terminate if an error is
|
||||
// encountered, and immediately fall back to the second stage. In addition to
|
||||
// avoiding wasted work by attempting to recover from errors here, the empty
|
||||
// implementation of [BailErrorStrategy.Sync] improves the performance of
|
||||
// the first stage.
|
||||
//
|
||||
// - Silent validation: When syntax errors are not being
|
||||
// Reported or logged, and the parse result is simply ignored if errors occur,
|
||||
// the [BailErrorStrategy] avoids wasting work on recovering from errors
|
||||
// when the result will be ignored either way.
|
||||
//
|
||||
// myparser.SetErrorHandler(NewBailErrorStrategy())
|
||||
//
|
||||
// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)]
|
||||
type BailErrorStrategy struct {
|
||||
*DefaultErrorStrategy
|
||||
}
|
||||
|
||||
var _ ErrorStrategy = &BailErrorStrategy{}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewBailErrorStrategy() *BailErrorStrategy {
|
||||
|
||||
b := new(BailErrorStrategy)
|
||||
|
||||
b.DefaultErrorStrategy = NewDefaultErrorStrategy()
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// Recover Instead of recovering from exception e, re-panic it wrapped
|
||||
// in a [ParseCancellationException] so it is not caught by the
|
||||
// rule func catches. Use Exception.GetCause() to get the
|
||||
// original [RecognitionException].
|
||||
func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
|
||||
context := recognizer.GetParserRuleContext()
|
||||
for context != nil {
|
||||
context.SetException(e)
|
||||
if parent, ok := context.GetParent().(ParserRuleContext); ok {
|
||||
context = parent
|
||||
} else {
|
||||
context = nil
|
||||
}
|
||||
}
|
||||
recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly
|
||||
}
|
||||
|
||||
// RecoverInline makes sure we don't attempt to recover inline if the parser
|
||||
// successfully recovers, it won't panic an exception.
|
||||
func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
b.Recover(recognizer, NewInputMisMatchException(recognizer))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sync makes sure we don't attempt to recover from problems in sub-rules.
|
||||
func (b *BailErrorStrategy) Sync(_ Parser) {
|
||||
}
|
259
e2e/vendor/github.com/antlr4-go/antlr/v4/errors.go
generated
vendored
Normal file
259
e2e/vendor/github.com/antlr4-go/antlr/v4/errors.go
generated
vendored
Normal file
@ -0,0 +1,259 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
|
||||
// 3 kinds of errors: prediction errors, failed predicate errors, and
|
||||
// mismatched input errors. In each case, the parser knows where it is
|
||||
// in the input, where it is in the ATN, the rule invocation stack,
|
||||
// and what kind of problem occurred.
|
||||
|
||||
type RecognitionException interface {
|
||||
GetOffendingToken() Token
|
||||
GetMessage() string
|
||||
GetInputStream() IntStream
|
||||
}
|
||||
|
||||
type BaseRecognitionException struct {
|
||||
message string
|
||||
recognizer Recognizer
|
||||
offendingToken Token
|
||||
offendingState int
|
||||
ctx RuleContext
|
||||
input IntStream
|
||||
}
|
||||
|
||||
func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
|
||||
|
||||
// todo
|
||||
// Error.call(this)
|
||||
//
|
||||
// if (!!Error.captureStackTrace) {
|
||||
// Error.captureStackTrace(this, RecognitionException)
|
||||
// } else {
|
||||
// stack := NewError().stack
|
||||
// }
|
||||
// TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int
|
||||
|
||||
t := new(BaseRecognitionException)
|
||||
|
||||
t.message = message
|
||||
t.recognizer = recognizer
|
||||
t.input = input
|
||||
t.ctx = ctx
|
||||
|
||||
// The current Token when an error occurred. Since not all streams
|
||||
// support accessing symbols by index, we have to track the {@link Token}
|
||||
// instance itself.
|
||||
//
|
||||
t.offendingToken = nil
|
||||
|
||||
// Get the ATN state number the parser was in at the time the error
|
||||
// occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the
|
||||
// DecisionState number. For others, it is the state whose outgoing edge we couldn't Match.
|
||||
//
|
||||
t.offendingState = -1
|
||||
if t.recognizer != nil {
|
||||
t.offendingState = t.recognizer.GetState()
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (b *BaseRecognitionException) GetMessage() string {
|
||||
return b.message
|
||||
}
|
||||
|
||||
func (b *BaseRecognitionException) GetOffendingToken() Token {
|
||||
return b.offendingToken
|
||||
}
|
||||
|
||||
func (b *BaseRecognitionException) GetInputStream() IntStream {
|
||||
return b.input
|
||||
}
|
||||
|
||||
// <p>If the state number is not known, b method returns -1.</p>
|
||||
|
||||
// getExpectedTokens gets the set of input symbols which could potentially follow the
|
||||
// previously Matched symbol at the time this exception was raised.
|
||||
//
|
||||
// If the set of expected tokens is not known and could not be computed,
|
||||
// this method returns nil.
|
||||
//
|
||||
// The func returns the set of token types that could potentially follow the current
|
||||
// state in the {ATN}, or nil if the information is not available.
|
||||
|
||||
func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
|
||||
if b.recognizer != nil {
|
||||
return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BaseRecognitionException) String() string {
|
||||
return b.message
|
||||
}
|
||||
|
||||
type LexerNoViableAltException struct {
|
||||
*BaseRecognitionException
|
||||
|
||||
startIndex int
|
||||
deadEndConfigs *ATNConfigSet
|
||||
}
|
||||
|
||||
func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException {
|
||||
|
||||
l := new(LexerNoViableAltException)
|
||||
|
||||
l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
|
||||
|
||||
l.startIndex = startIndex
|
||||
l.deadEndConfigs = deadEndConfigs
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *LexerNoViableAltException) String() string {
|
||||
symbol := ""
|
||||
if l.startIndex >= 0 && l.startIndex < l.input.Size() {
|
||||
symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
|
||||
}
|
||||
return "LexerNoViableAltException" + symbol
|
||||
}
|
||||
|
||||
type NoViableAltException struct {
|
||||
*BaseRecognitionException
|
||||
|
||||
startToken Token
|
||||
offendingToken Token
|
||||
ctx ParserRuleContext
|
||||
deadEndConfigs *ATNConfigSet
|
||||
}
|
||||
|
||||
// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths
|
||||
// to take based upon the remaining input. It tracks the starting token
|
||||
// of the offending input and also knows where the parser was
|
||||
// in the various paths when the error.
|
||||
//
|
||||
// Reported by [ReportNoViableAlternative]
|
||||
func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
|
||||
|
||||
if ctx == nil {
|
||||
ctx = recognizer.GetParserRuleContext()
|
||||
}
|
||||
|
||||
if offendingToken == nil {
|
||||
offendingToken = recognizer.GetCurrentToken()
|
||||
}
|
||||
|
||||
if startToken == nil {
|
||||
startToken = recognizer.GetCurrentToken()
|
||||
}
|
||||
|
||||
if input == nil {
|
||||
input = recognizer.GetInputStream().(TokenStream)
|
||||
}
|
||||
|
||||
n := new(NoViableAltException)
|
||||
n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
|
||||
|
||||
// Which configurations did we try at input.Index() that couldn't Match
|
||||
// input.LT(1)
|
||||
n.deadEndConfigs = deadEndConfigs
|
||||
|
||||
// The token object at the start index the input stream might
|
||||
// not be buffering tokens so get a reference to it.
|
||||
//
|
||||
// At the time the error occurred, of course the stream needs to keep a
|
||||
// buffer of all the tokens, but later we might not have access to those.
|
||||
n.startToken = startToken
|
||||
n.offendingToken = offendingToken
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
type InputMisMatchException struct {
|
||||
*BaseRecognitionException
|
||||
}
|
||||
|
||||
// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as
|
||||
// when the current input does not Match the expected token.
|
||||
func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
|
||||
|
||||
i := new(InputMisMatchException)
|
||||
i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
|
||||
|
||||
i.offendingToken = recognizer.GetCurrentToken()
|
||||
|
||||
return i
|
||||
|
||||
}
|
||||
|
||||
// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates
|
||||
// occurs when normally parsing the alternative just like Matching a token.
|
||||
// Disambiguating predicate evaluation occurs when we test a predicate during
|
||||
// prediction.
|
||||
type FailedPredicateException struct {
|
||||
*BaseRecognitionException
|
||||
|
||||
ruleIndex int
|
||||
predicateIndex int
|
||||
predicate string
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
|
||||
|
||||
f := new(FailedPredicateException)
|
||||
|
||||
f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
|
||||
|
||||
s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
|
||||
trans := s.GetTransitions()[0]
|
||||
if trans2, ok := trans.(*PredicateTransition); ok {
|
||||
f.ruleIndex = trans2.ruleIndex
|
||||
f.predicateIndex = trans2.predIndex
|
||||
} else {
|
||||
f.ruleIndex = 0
|
||||
f.predicateIndex = 0
|
||||
}
|
||||
f.predicate = predicate
|
||||
f.offendingToken = recognizer.GetCurrentToken()
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *FailedPredicateException) formatMessage(predicate, message string) string {
|
||||
if message != "" {
|
||||
return message
|
||||
}
|
||||
|
||||
return "failed predicate: {" + predicate + "}?"
|
||||
}
|
||||
|
||||
type ParseCancellationException struct {
|
||||
}
|
||||
|
||||
func (p ParseCancellationException) GetOffendingToken() Token {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (p ParseCancellationException) GetMessage() string {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (p ParseCancellationException) GetInputStream() IntStream {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func NewParseCancellationException() *ParseCancellationException {
|
||||
// Error.call(this)
|
||||
// Error.captureStackTrace(this, ParseCancellationException)
|
||||
return new(ParseCancellationException)
|
||||
}
|
67
e2e/vendor/github.com/antlr4-go/antlr/v4/file_stream.go
generated
vendored
Normal file
67
e2e/vendor/github.com/antlr4-go/antlr/v4/file_stream.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
)
|
||||
|
||||
// This is an InputStream that is loaded from a file all at once
|
||||
// when you construct the object.
|
||||
|
||||
type FileStream struct {
|
||||
InputStream
|
||||
filename string
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewFileStream(fileName string) (*FileStream, error) {
|
||||
|
||||
f, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func(f *os.File) {
|
||||
errF := f.Close()
|
||||
if errF != nil {
|
||||
}
|
||||
}(f)
|
||||
|
||||
reader := bufio.NewReader(f)
|
||||
fInfo, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs := &FileStream{
|
||||
InputStream: InputStream{
|
||||
index: 0,
|
||||
name: fileName,
|
||||
},
|
||||
filename: fileName,
|
||||
}
|
||||
|
||||
// Pre-build the buffer and read runes efficiently
|
||||
//
|
||||
fs.data = make([]rune, 0, fInfo.Size())
|
||||
for {
|
||||
r, _, err := reader.ReadRune()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
fs.data = append(fs.data, r)
|
||||
}
|
||||
fs.size = len(fs.data) // Size in runes
|
||||
|
||||
// All done.
|
||||
//
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func (f *FileStream) GetSourceName() string {
|
||||
return f.filename
|
||||
}
|
157
e2e/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
generated
vendored
Normal file
157
e2e/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
)
|
||||
|
||||
type InputStream struct {
|
||||
name string
|
||||
index int
|
||||
data []rune
|
||||
size int
|
||||
}
|
||||
|
||||
// NewIoStream creates a new input stream from the given io.Reader reader.
|
||||
// Note that the reader is read completely into memory and so it must actually
|
||||
// have a stopping point - you cannot pass in a reader on an open-ended source such
|
||||
// as a socket for instance.
|
||||
func NewIoStream(reader io.Reader) *InputStream {
|
||||
|
||||
rReader := bufio.NewReader(reader)
|
||||
|
||||
is := &InputStream{
|
||||
name: "<empty>",
|
||||
index: 0,
|
||||
}
|
||||
|
||||
// Pre-build the buffer and read runes reasonably efficiently given that
|
||||
// we don't exactly know how big the input is.
|
||||
//
|
||||
is.data = make([]rune, 0, 512)
|
||||
for {
|
||||
r, _, err := rReader.ReadRune()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
is.data = append(is.data, r)
|
||||
}
|
||||
is.size = len(is.data) // number of runes
|
||||
return is
|
||||
}
|
||||
|
||||
// NewInputStream creates a new input stream from the given string
|
||||
func NewInputStream(data string) *InputStream {
|
||||
|
||||
is := &InputStream{
|
||||
name: "<empty>",
|
||||
index: 0,
|
||||
data: []rune(data), // This is actually the most efficient way
|
||||
}
|
||||
is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too
|
||||
return is
|
||||
}
|
||||
|
||||
func (is *InputStream) reset() {
|
||||
is.index = 0
|
||||
}
|
||||
|
||||
// Consume moves the input pointer to the next character in the input stream
|
||||
func (is *InputStream) Consume() {
|
||||
if is.index >= is.size {
|
||||
// assert is.LA(1) == TokenEOF
|
||||
panic("cannot consume EOF")
|
||||
}
|
||||
is.index++
|
||||
}
|
||||
|
||||
// LA returns the character at the given offset from the start of the input stream
|
||||
func (is *InputStream) LA(offset int) int {
|
||||
|
||||
if offset == 0 {
|
||||
return 0 // nil
|
||||
}
|
||||
if offset < 0 {
|
||||
offset++ // e.g., translate LA(-1) to use offset=0
|
||||
}
|
||||
pos := is.index + offset - 1
|
||||
|
||||
if pos < 0 || pos >= is.size { // invalid
|
||||
return TokenEOF
|
||||
}
|
||||
|
||||
return int(is.data[pos])
|
||||
}
|
||||
|
||||
// LT returns the character at the given offset from the start of the input stream
|
||||
func (is *InputStream) LT(offset int) int {
|
||||
return is.LA(offset)
|
||||
}
|
||||
|
||||
// Index returns the current offset in to the input stream
|
||||
func (is *InputStream) Index() int {
|
||||
return is.index
|
||||
}
|
||||
|
||||
// Size returns the total number of characters in the input stream
|
||||
func (is *InputStream) Size() int {
|
||||
return is.size
|
||||
}
|
||||
|
||||
// Mark does nothing here as we have entire buffer
|
||||
func (is *InputStream) Mark() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Release does nothing here as we have entire buffer
|
||||
func (is *InputStream) Release(_ int) {
|
||||
}
|
||||
|
||||
// Seek the input point to the provided index offset
|
||||
func (is *InputStream) Seek(index int) {
|
||||
if index <= is.index {
|
||||
is.index = index // just jump don't update stream state (line,...)
|
||||
return
|
||||
}
|
||||
// seek forward
|
||||
is.index = intMin(index, is.size)
|
||||
}
|
||||
|
||||
// GetText returns the text from the input stream from the start to the stop index
|
||||
func (is *InputStream) GetText(start int, stop int) string {
|
||||
if stop >= is.size {
|
||||
stop = is.size - 1
|
||||
}
|
||||
if start >= is.size {
|
||||
return ""
|
||||
}
|
||||
|
||||
return string(is.data[start : stop+1])
|
||||
}
|
||||
|
||||
// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last
|
||||
// character of the stop token
|
||||
func (is *InputStream) GetTextFromTokens(start, stop Token) string {
|
||||
if start != nil && stop != nil {
|
||||
return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (is *InputStream) GetTextFromInterval(i Interval) string {
|
||||
return is.GetText(i.Start, i.Stop)
|
||||
}
|
||||
|
||||
func (*InputStream) GetSourceName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// String returns the entire input stream as a string
|
||||
func (is *InputStream) String() string {
|
||||
return string(is.data)
|
||||
}
|
16
e2e/vendor/github.com/antlr4-go/antlr/v4/int_stream.go
generated
vendored
Normal file
16
e2e/vendor/github.com/antlr4-go/antlr/v4/int_stream.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
type IntStream interface {
|
||||
Consume()
|
||||
LA(int) int
|
||||
Mark() int
|
||||
Release(marker int)
|
||||
Index() int
|
||||
Seek(index int)
|
||||
Size() int
|
||||
GetSourceName() string
|
||||
}
|
330
e2e/vendor/github.com/antlr4-go/antlr/v4/interval_set.go
generated
vendored
Normal file
330
e2e/vendor/github.com/antlr4-go/antlr/v4/interval_set.go
generated
vendored
Normal file
@ -0,0 +1,330 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Interval struct {
|
||||
Start int
|
||||
Stop int
|
||||
}
|
||||
|
||||
// NewInterval creates a new interval with the given start and stop values.
|
||||
func NewInterval(start, stop int) Interval {
|
||||
return Interval{
|
||||
Start: start,
|
||||
Stop: stop,
|
||||
}
|
||||
}
|
||||
|
||||
// Contains returns true if the given item is contained within the interval.
|
||||
func (i Interval) Contains(item int) bool {
|
||||
return item >= i.Start && item < i.Stop
|
||||
}
|
||||
|
||||
// String generates a string representation of the interval.
|
||||
func (i Interval) String() string {
|
||||
if i.Start == i.Stop-1 {
|
||||
return strconv.Itoa(i.Start)
|
||||
}
|
||||
|
||||
return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
|
||||
}
|
||||
|
||||
// Length returns the length of the interval.
|
||||
func (i Interval) Length() int {
|
||||
return i.Stop - i.Start
|
||||
}
|
||||
|
||||
// IntervalSet represents a collection of [Intervals], which may be read-only.
|
||||
type IntervalSet struct {
|
||||
intervals []Interval
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
// NewIntervalSet creates a new empty, writable, interval set.
|
||||
func NewIntervalSet() *IntervalSet {
|
||||
|
||||
i := new(IntervalSet)
|
||||
|
||||
i.intervals = nil
|
||||
i.readOnly = false
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *IntervalSet) Equals(other *IntervalSet) bool {
|
||||
if len(i.intervals) != len(other.intervals) {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, v := range i.intervals {
|
||||
if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (i *IntervalSet) first() int {
|
||||
if len(i.intervals) == 0 {
|
||||
return TokenInvalidType
|
||||
}
|
||||
|
||||
return i.intervals[0].Start
|
||||
}
|
||||
|
||||
func (i *IntervalSet) addOne(v int) {
|
||||
i.addInterval(NewInterval(v, v+1))
|
||||
}
|
||||
|
||||
func (i *IntervalSet) addRange(l, h int) {
|
||||
i.addInterval(NewInterval(l, h+1))
|
||||
}
|
||||
|
||||
func (i *IntervalSet) addInterval(v Interval) {
|
||||
if i.intervals == nil {
|
||||
i.intervals = make([]Interval, 0)
|
||||
i.intervals = append(i.intervals, v)
|
||||
} else {
|
||||
// find insert pos
|
||||
for k, interval := range i.intervals {
|
||||
// distinct range -> insert
|
||||
if v.Stop < interval.Start {
|
||||
i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...)
|
||||
return
|
||||
} else if v.Stop == interval.Start {
|
||||
i.intervals[k].Start = v.Start
|
||||
return
|
||||
} else if v.Start <= interval.Stop {
|
||||
i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop))
|
||||
|
||||
// if not applying to end, merge potential overlaps
|
||||
if k < len(i.intervals)-1 {
|
||||
l := i.intervals[k]
|
||||
r := i.intervals[k+1]
|
||||
// if r contained in l
|
||||
if l.Stop >= r.Stop {
|
||||
i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
|
||||
} else if l.Stop >= r.Start { // partial overlap
|
||||
i.intervals[k] = NewInterval(l.Start, r.Stop)
|
||||
i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
// greater than any exiting
|
||||
i.intervals = append(i.intervals, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
|
||||
if other.intervals != nil {
|
||||
for k := 0; k < len(other.intervals); k++ {
|
||||
i2 := other.intervals[k]
|
||||
i.addInterval(NewInterval(i2.Start, i2.Stop))
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *IntervalSet) complement(start int, stop int) *IntervalSet {
|
||||
result := NewIntervalSet()
|
||||
result.addInterval(NewInterval(start, stop+1))
|
||||
for j := 0; j < len(i.intervals); j++ {
|
||||
result.removeRange(i.intervals[j])
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (i *IntervalSet) contains(item int) bool {
|
||||
if i.intervals == nil {
|
||||
return false
|
||||
}
|
||||
for k := 0; k < len(i.intervals); k++ {
|
||||
if i.intervals[k].Contains(item) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (i *IntervalSet) length() int {
|
||||
iLen := 0
|
||||
|
||||
for _, v := range i.intervals {
|
||||
iLen += v.Length()
|
||||
}
|
||||
|
||||
return iLen
|
||||
}
|
||||
|
||||
func (i *IntervalSet) removeRange(v Interval) {
|
||||
if v.Start == v.Stop-1 {
|
||||
i.removeOne(v.Start)
|
||||
} else if i.intervals != nil {
|
||||
k := 0
|
||||
for n := 0; n < len(i.intervals); n++ {
|
||||
ni := i.intervals[k]
|
||||
// intervals are ordered
|
||||
if v.Stop <= ni.Start {
|
||||
return
|
||||
} else if v.Start > ni.Start && v.Stop < ni.Stop {
|
||||
i.intervals[k] = NewInterval(ni.Start, v.Start)
|
||||
x := NewInterval(v.Stop, ni.Stop)
|
||||
// i.intervals.splice(k, 0, x)
|
||||
i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
|
||||
return
|
||||
} else if v.Start <= ni.Start && v.Stop >= ni.Stop {
|
||||
// i.intervals.splice(k, 1)
|
||||
i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
|
||||
k = k - 1 // need another pass
|
||||
} else if v.Start < ni.Stop {
|
||||
i.intervals[k] = NewInterval(ni.Start, v.Start)
|
||||
} else if v.Stop < ni.Stop {
|
||||
i.intervals[k] = NewInterval(v.Stop, ni.Stop)
|
||||
}
|
||||
k++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (i *IntervalSet) removeOne(v int) {
|
||||
if i.intervals != nil {
|
||||
for k := 0; k < len(i.intervals); k++ {
|
||||
ki := i.intervals[k]
|
||||
// intervals i ordered
|
||||
if v < ki.Start {
|
||||
return
|
||||
} else if v == ki.Start && v == ki.Stop-1 {
|
||||
// i.intervals.splice(k, 1)
|
||||
i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
|
||||
return
|
||||
} else if v == ki.Start {
|
||||
i.intervals[k] = NewInterval(ki.Start+1, ki.Stop)
|
||||
return
|
||||
} else if v == ki.Stop-1 {
|
||||
i.intervals[k] = NewInterval(ki.Start, ki.Stop-1)
|
||||
return
|
||||
} else if v < ki.Stop-1 {
|
||||
x := NewInterval(ki.Start, v)
|
||||
ki.Start = v + 1
|
||||
// i.intervals.splice(k, 0, x)
|
||||
i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (i *IntervalSet) String() string {
|
||||
return i.StringVerbose(nil, nil, false)
|
||||
}
|
||||
|
||||
func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
|
||||
|
||||
if i.intervals == nil {
|
||||
return "{}"
|
||||
} else if literalNames != nil || symbolicNames != nil {
|
||||
return i.toTokenString(literalNames, symbolicNames)
|
||||
} else if elemsAreChar {
|
||||
return i.toCharString()
|
||||
}
|
||||
|
||||
return i.toIndexString()
|
||||
}
|
||||
|
||||
func (i *IntervalSet) GetIntervals() []Interval {
|
||||
return i.intervals
|
||||
}
|
||||
|
||||
func (i *IntervalSet) toCharString() string {
|
||||
names := make([]string, len(i.intervals))
|
||||
|
||||
var sb strings.Builder
|
||||
|
||||
for j := 0; j < len(i.intervals); j++ {
|
||||
v := i.intervals[j]
|
||||
if v.Stop == v.Start+1 {
|
||||
if v.Start == TokenEOF {
|
||||
names = append(names, "<EOF>")
|
||||
} else {
|
||||
sb.WriteByte('\'')
|
||||
sb.WriteRune(rune(v.Start))
|
||||
sb.WriteByte('\'')
|
||||
names = append(names, sb.String())
|
||||
sb.Reset()
|
||||
}
|
||||
} else {
|
||||
sb.WriteByte('\'')
|
||||
sb.WriteRune(rune(v.Start))
|
||||
sb.WriteString("'..'")
|
||||
sb.WriteRune(rune(v.Stop - 1))
|
||||
sb.WriteByte('\'')
|
||||
names = append(names, sb.String())
|
||||
sb.Reset()
|
||||
}
|
||||
}
|
||||
if len(names) > 1 {
|
||||
return "{" + strings.Join(names, ", ") + "}"
|
||||
}
|
||||
|
||||
return names[0]
|
||||
}
|
||||
|
||||
func (i *IntervalSet) toIndexString() string {
|
||||
|
||||
names := make([]string, 0)
|
||||
for j := 0; j < len(i.intervals); j++ {
|
||||
v := i.intervals[j]
|
||||
if v.Stop == v.Start+1 {
|
||||
if v.Start == TokenEOF {
|
||||
names = append(names, "<EOF>")
|
||||
} else {
|
||||
names = append(names, strconv.Itoa(v.Start))
|
||||
}
|
||||
} else {
|
||||
names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1))
|
||||
}
|
||||
}
|
||||
if len(names) > 1 {
|
||||
return "{" + strings.Join(names, ", ") + "}"
|
||||
}
|
||||
|
||||
return names[0]
|
||||
}
|
||||
|
||||
func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
|
||||
names := make([]string, 0)
|
||||
for _, v := range i.intervals {
|
||||
for j := v.Start; j < v.Stop; j++ {
|
||||
names = append(names, i.elementName(literalNames, symbolicNames, j))
|
||||
}
|
||||
}
|
||||
if len(names) > 1 {
|
||||
return "{" + strings.Join(names, ", ") + "}"
|
||||
}
|
||||
|
||||
return names[0]
|
||||
}
|
||||
|
||||
func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
|
||||
if a == TokenEOF {
|
||||
return "<EOF>"
|
||||
} else if a == TokenEpsilon {
|
||||
return "<EPSILON>"
|
||||
} else {
|
||||
if a < len(literalNames) && literalNames[a] != "" {
|
||||
return literalNames[a]
|
||||
}
|
||||
|
||||
return symbolicNames[a]
|
||||
}
|
||||
}
|
685
e2e/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
generated
vendored
Normal file
685
e2e/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
generated
vendored
Normal file
@ -0,0 +1,685 @@
|
||||
package antlr
|
||||
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Collectable is an interface that a struct should implement if it is to be
|
||||
// usable as a key in these collections.
|
||||
type Collectable[T any] interface {
|
||||
Hash() int
|
||||
Equals(other Collectable[T]) bool
|
||||
}
|
||||
|
||||
type Comparator[T any] interface {
|
||||
Hash1(o T) int
|
||||
Equals2(T, T) bool
|
||||
}
|
||||
|
||||
type CollectionSource int
|
||||
type CollectionDescriptor struct {
|
||||
SybolicName string
|
||||
Description string
|
||||
}
|
||||
|
||||
const (
|
||||
UnknownCollection CollectionSource = iota
|
||||
ATNConfigLookupCollection
|
||||
ATNStateCollection
|
||||
DFAStateCollection
|
||||
ATNConfigCollection
|
||||
PredictionContextCollection
|
||||
SemanticContextCollection
|
||||
ClosureBusyCollection
|
||||
PredictionVisitedCollection
|
||||
MergeCacheCollection
|
||||
PredictionContextCacheCollection
|
||||
AltSetCollection
|
||||
ReachSetCollection
|
||||
)
|
||||
|
||||
var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{
|
||||
UnknownCollection: {
|
||||
SybolicName: "UnknownCollection",
|
||||
Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.",
|
||||
},
|
||||
ATNConfigCollection: {
|
||||
SybolicName: "ATNConfigCollection",
|
||||
Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." +
|
||||
"For instance, it is used to store the results of the closure() operation in the ATN.",
|
||||
},
|
||||
ATNConfigLookupCollection: {
|
||||
SybolicName: "ATNConfigLookupCollection",
|
||||
Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." +
|
||||
"This is used to prevent duplicating equivalent states in an ATNConfigurationSet.",
|
||||
},
|
||||
ATNStateCollection: {
|
||||
SybolicName: "ATNStateCollection",
|
||||
Description: "ATNState collection. This is used to store the states of the ATN.",
|
||||
},
|
||||
DFAStateCollection: {
|
||||
SybolicName: "DFAStateCollection",
|
||||
Description: "DFAState collection. This is used to store the states of the DFA.",
|
||||
},
|
||||
PredictionContextCollection: {
|
||||
SybolicName: "PredictionContextCollection",
|
||||
Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.",
|
||||
},
|
||||
SemanticContextCollection: {
|
||||
SybolicName: "SemanticContextCollection",
|
||||
Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.",
|
||||
},
|
||||
ClosureBusyCollection: {
|
||||
SybolicName: "ClosureBusyCollection",
|
||||
Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." +
|
||||
"It stores ATNConfigs that are currently being processed in the closure() operation.",
|
||||
},
|
||||
PredictionVisitedCollection: {
|
||||
SybolicName: "PredictionVisitedCollection",
|
||||
Description: "A map that records whether we have visited a particular context when searching through cached entries.",
|
||||
},
|
||||
MergeCacheCollection: {
|
||||
SybolicName: "MergeCacheCollection",
|
||||
Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.",
|
||||
},
|
||||
PredictionContextCacheCollection: {
|
||||
SybolicName: "PredictionContextCacheCollection",
|
||||
Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.",
|
||||
},
|
||||
AltSetCollection: {
|
||||
SybolicName: "AltSetCollection",
|
||||
Description: "Used to eliminate duplicate alternatives in an ATN config set.",
|
||||
},
|
||||
ReachSetCollection: {
|
||||
SybolicName: "ReachSetCollection",
|
||||
Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.",
|
||||
},
|
||||
}
|
||||
|
||||
// JStore implements a container that allows the use of a struct to calculate the key
|
||||
// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
|
||||
// serve the needs of the ANTLR Go runtime.
|
||||
//
|
||||
// For ease of porting the logic of the runtime from the master target (Java), this collection
|
||||
// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
|
||||
// function as the key. The values are stored in a standard go map which internally is a form of hashmap
|
||||
// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
|
||||
// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
|
||||
// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
|
||||
// we understand the requirements, then this is fine - this is not a general purpose collection.
|
||||
type JStore[T any, C Comparator[T]] struct {
|
||||
store map[int][]T
|
||||
len int
|
||||
comparator Comparator[T]
|
||||
stats *JStatRec
|
||||
}
|
||||
|
||||
func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] {
|
||||
|
||||
if comparator == nil {
|
||||
panic("comparator cannot be nil")
|
||||
}
|
||||
|
||||
s := &JStore[T, C]{
|
||||
store: make(map[int][]T, 1),
|
||||
comparator: comparator,
|
||||
}
|
||||
if collectStats {
|
||||
s.stats = &JStatRec{
|
||||
Source: cType,
|
||||
Description: desc,
|
||||
}
|
||||
|
||||
// Track where we created it from if we are being asked to do so
|
||||
if runtimeConfig.statsTraceStacks {
|
||||
s.stats.CreateStack = debug.Stack()
|
||||
}
|
||||
Statistics.AddJStatRec(s.stats)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Put will store given value in the collection. Note that the key for storage is generated from
|
||||
// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
|
||||
// as any kind of general collection.
|
||||
//
|
||||
// If the key has a hash conflict, then the value will be added to the slice of values associated with the
|
||||
// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
|
||||
// tested by calling the equals() method on the key.
|
||||
//
|
||||
// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
|
||||
//
|
||||
// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
|
||||
func (s *JStore[T, C]) Put(value T) (v T, exists bool) {
|
||||
|
||||
if collectStats {
|
||||
s.stats.Puts++
|
||||
}
|
||||
kh := s.comparator.Hash1(value)
|
||||
|
||||
var hClash bool
|
||||
for _, v1 := range s.store[kh] {
|
||||
hClash = true
|
||||
if s.comparator.Equals2(value, v1) {
|
||||
if collectStats {
|
||||
s.stats.PutHits++
|
||||
s.stats.PutHashConflicts++
|
||||
}
|
||||
return v1, true
|
||||
}
|
||||
if collectStats {
|
||||
s.stats.PutMisses++
|
||||
}
|
||||
}
|
||||
if collectStats && hClash {
|
||||
s.stats.PutHashConflicts++
|
||||
}
|
||||
s.store[kh] = append(s.store[kh], value)
|
||||
|
||||
if collectStats {
|
||||
if len(s.store[kh]) > s.stats.MaxSlotSize {
|
||||
s.stats.MaxSlotSize = len(s.store[kh])
|
||||
}
|
||||
}
|
||||
s.len++
|
||||
if collectStats {
|
||||
s.stats.CurSize = s.len
|
||||
if s.len > s.stats.MaxSize {
|
||||
s.stats.MaxSize = s.len
|
||||
}
|
||||
}
|
||||
return value, false
|
||||
}
|
||||
|
||||
// Get will return the value associated with the key - the type of the key is the same type as the value
|
||||
// which would not generally be useful, but this is a specific thing for ANTLR where the key is
|
||||
// generated using the object we are going to store.
|
||||
func (s *JStore[T, C]) Get(key T) (T, bool) {
|
||||
if collectStats {
|
||||
s.stats.Gets++
|
||||
}
|
||||
kh := s.comparator.Hash1(key)
|
||||
var hClash bool
|
||||
for _, v := range s.store[kh] {
|
||||
hClash = true
|
||||
if s.comparator.Equals2(key, v) {
|
||||
if collectStats {
|
||||
s.stats.GetHits++
|
||||
s.stats.GetHashConflicts++
|
||||
}
|
||||
return v, true
|
||||
}
|
||||
if collectStats {
|
||||
s.stats.GetMisses++
|
||||
}
|
||||
}
|
||||
if collectStats {
|
||||
if hClash {
|
||||
s.stats.GetHashConflicts++
|
||||
}
|
||||
s.stats.GetNoEnt++
|
||||
}
|
||||
return key, false
|
||||
}
|
||||
|
||||
// Contains returns true if the given key is present in the store
|
||||
func (s *JStore[T, C]) Contains(key T) bool {
|
||||
_, present := s.Get(key)
|
||||
return present
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
|
||||
vs := make([]T, 0, len(s.store))
|
||||
for _, v := range s.store {
|
||||
vs = append(vs, v...)
|
||||
}
|
||||
sort.Slice(vs, func(i, j int) bool {
|
||||
return less(vs[i], vs[j])
|
||||
})
|
||||
|
||||
return vs
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Each(f func(T) bool) {
|
||||
for _, e := range s.store {
|
||||
for _, v := range e {
|
||||
f(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Len() int {
|
||||
return s.len
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Values() []T {
|
||||
vs := make([]T, 0, len(s.store))
|
||||
for _, e := range s.store {
|
||||
vs = append(vs, e...)
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
type entry[K, V any] struct {
|
||||
key K
|
||||
val V
|
||||
}
|
||||
|
||||
type JMap[K, V any, C Comparator[K]] struct {
|
||||
store map[int][]*entry[K, V]
|
||||
len int
|
||||
comparator Comparator[K]
|
||||
stats *JStatRec
|
||||
}
|
||||
|
||||
func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] {
|
||||
m := &JMap[K, V, C]{
|
||||
store: make(map[int][]*entry[K, V], 1),
|
||||
comparator: comparator,
|
||||
}
|
||||
if collectStats {
|
||||
m.stats = &JStatRec{
|
||||
Source: cType,
|
||||
Description: desc,
|
||||
}
|
||||
// Track where we created it from if we are being asked to do so
|
||||
if runtimeConfig.statsTraceStacks {
|
||||
m.stats.CreateStack = debug.Stack()
|
||||
}
|
||||
Statistics.AddJStatRec(m.stats)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) {
|
||||
if collectStats {
|
||||
m.stats.Puts++
|
||||
}
|
||||
kh := m.comparator.Hash1(key)
|
||||
|
||||
var hClash bool
|
||||
for _, e := range m.store[kh] {
|
||||
hClash = true
|
||||
if m.comparator.Equals2(e.key, key) {
|
||||
if collectStats {
|
||||
m.stats.PutHits++
|
||||
m.stats.PutHashConflicts++
|
||||
}
|
||||
return e.val, true
|
||||
}
|
||||
if collectStats {
|
||||
m.stats.PutMisses++
|
||||
}
|
||||
}
|
||||
if collectStats {
|
||||
if hClash {
|
||||
m.stats.PutHashConflicts++
|
||||
}
|
||||
}
|
||||
m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
|
||||
if collectStats {
|
||||
if len(m.store[kh]) > m.stats.MaxSlotSize {
|
||||
m.stats.MaxSlotSize = len(m.store[kh])
|
||||
}
|
||||
}
|
||||
m.len++
|
||||
if collectStats {
|
||||
m.stats.CurSize = m.len
|
||||
if m.len > m.stats.MaxSize {
|
||||
m.stats.MaxSize = m.len
|
||||
}
|
||||
}
|
||||
return val, false
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Values() []V {
|
||||
vs := make([]V, 0, len(m.store))
|
||||
for _, e := range m.store {
|
||||
for _, v := range e {
|
||||
vs = append(vs, v.val)
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Get(key K) (V, bool) {
|
||||
if collectStats {
|
||||
m.stats.Gets++
|
||||
}
|
||||
var none V
|
||||
kh := m.comparator.Hash1(key)
|
||||
var hClash bool
|
||||
for _, e := range m.store[kh] {
|
||||
hClash = true
|
||||
if m.comparator.Equals2(e.key, key) {
|
||||
if collectStats {
|
||||
m.stats.GetHits++
|
||||
m.stats.GetHashConflicts++
|
||||
}
|
||||
return e.val, true
|
||||
}
|
||||
if collectStats {
|
||||
m.stats.GetMisses++
|
||||
}
|
||||
}
|
||||
if collectStats {
|
||||
if hClash {
|
||||
m.stats.GetHashConflicts++
|
||||
}
|
||||
m.stats.GetNoEnt++
|
||||
}
|
||||
return none, false
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Len() int {
|
||||
return m.len
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Delete(key K) {
|
||||
kh := m.comparator.Hash1(key)
|
||||
for i, e := range m.store[kh] {
|
||||
if m.comparator.Equals2(e.key, key) {
|
||||
m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
|
||||
m.len--
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Clear() {
|
||||
m.store = make(map[int][]*entry[K, V])
|
||||
}
|
||||
|
||||
type JPCMap struct {
|
||||
store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]]
|
||||
size int
|
||||
stats *JStatRec
|
||||
}
|
||||
|
||||
func NewJPCMap(cType CollectionSource, desc string) *JPCMap {
|
||||
m := &JPCMap{
|
||||
store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc),
|
||||
}
|
||||
if collectStats {
|
||||
m.stats = &JStatRec{
|
||||
Source: cType,
|
||||
Description: desc,
|
||||
}
|
||||
// Track where we created it from if we are being asked to do so
|
||||
if runtimeConfig.statsTraceStacks {
|
||||
m.stats.CreateStack = debug.Stack()
|
||||
}
|
||||
Statistics.AddJStatRec(m.stats)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
|
||||
if collectStats {
|
||||
pcm.stats.Gets++
|
||||
}
|
||||
// Do we have a map stored by k1?
|
||||
//
|
||||
m2, present := pcm.store.Get(k1)
|
||||
if present {
|
||||
if collectStats {
|
||||
pcm.stats.GetHits++
|
||||
}
|
||||
// We found a map of values corresponding to k1, so now we need to look up k2 in that map
|
||||
//
|
||||
return m2.Get(k2)
|
||||
}
|
||||
if collectStats {
|
||||
pcm.stats.GetMisses++
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) {
|
||||
|
||||
if collectStats {
|
||||
pcm.stats.Puts++
|
||||
}
|
||||
// First does a map already exist for k1?
|
||||
//
|
||||
if m2, present := pcm.store.Get(k1); present {
|
||||
if collectStats {
|
||||
pcm.stats.PutHits++
|
||||
}
|
||||
_, present = m2.Put(k2, v)
|
||||
if !present {
|
||||
pcm.size++
|
||||
if collectStats {
|
||||
pcm.stats.CurSize = pcm.size
|
||||
if pcm.size > pcm.stats.MaxSize {
|
||||
pcm.stats.MaxSize = pcm.size
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No map found for k1, so we create it, add in our value, then store is
|
||||
//
|
||||
if collectStats {
|
||||
pcm.stats.PutMisses++
|
||||
m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry")
|
||||
} else {
|
||||
m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry")
|
||||
}
|
||||
|
||||
m2.Put(k2, v)
|
||||
pcm.store.Put(k1, m2)
|
||||
pcm.size++
|
||||
}
|
||||
}
|
||||
|
||||
type JPCMap2 struct {
|
||||
store map[int][]JPCEntry
|
||||
size int
|
||||
stats *JStatRec
|
||||
}
|
||||
|
||||
type JPCEntry struct {
|
||||
k1, k2, v *PredictionContext
|
||||
}
|
||||
|
||||
func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 {
|
||||
m := &JPCMap2{
|
||||
store: make(map[int][]JPCEntry, 1000),
|
||||
}
|
||||
if collectStats {
|
||||
m.stats = &JStatRec{
|
||||
Source: cType,
|
||||
Description: desc,
|
||||
}
|
||||
// Track where we created it from if we are being asked to do so
|
||||
if runtimeConfig.statsTraceStacks {
|
||||
m.stats.CreateStack = debug.Stack()
|
||||
}
|
||||
Statistics.AddJStatRec(m.stats)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func dHash(k1, k2 *PredictionContext) int {
|
||||
return k1.cachedHash*31 + k2.cachedHash
|
||||
}
|
||||
|
||||
func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
|
||||
if collectStats {
|
||||
pcm.stats.Gets++
|
||||
}
|
||||
|
||||
h := dHash(k1, k2)
|
||||
var hClash bool
|
||||
for _, e := range pcm.store[h] {
|
||||
hClash = true
|
||||
if e.k1.Equals(k1) && e.k2.Equals(k2) {
|
||||
if collectStats {
|
||||
pcm.stats.GetHits++
|
||||
pcm.stats.GetHashConflicts++
|
||||
}
|
||||
return e.v, true
|
||||
}
|
||||
if collectStats {
|
||||
pcm.stats.GetMisses++
|
||||
}
|
||||
}
|
||||
if collectStats {
|
||||
if hClash {
|
||||
pcm.stats.GetHashConflicts++
|
||||
}
|
||||
pcm.stats.GetNoEnt++
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) {
|
||||
if collectStats {
|
||||
pcm.stats.Puts++
|
||||
}
|
||||
h := dHash(k1, k2)
|
||||
var hClash bool
|
||||
for _, e := range pcm.store[h] {
|
||||
hClash = true
|
||||
if e.k1.Equals(k1) && e.k2.Equals(k2) {
|
||||
if collectStats {
|
||||
pcm.stats.PutHits++
|
||||
pcm.stats.PutHashConflicts++
|
||||
}
|
||||
return e.v, true
|
||||
}
|
||||
if collectStats {
|
||||
pcm.stats.PutMisses++
|
||||
}
|
||||
}
|
||||
if collectStats {
|
||||
if hClash {
|
||||
pcm.stats.PutHashConflicts++
|
||||
}
|
||||
}
|
||||
pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v})
|
||||
pcm.size++
|
||||
if collectStats {
|
||||
pcm.stats.CurSize = pcm.size
|
||||
if pcm.size > pcm.stats.MaxSize {
|
||||
pcm.stats.MaxSize = pcm.size
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
type VisitEntry struct {
|
||||
k *PredictionContext
|
||||
v *PredictionContext
|
||||
}
|
||||
type VisitRecord struct {
|
||||
store map[*PredictionContext]*PredictionContext
|
||||
len int
|
||||
stats *JStatRec
|
||||
}
|
||||
|
||||
type VisitList struct {
|
||||
cache *list.List
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
var visitListPool = VisitList{
|
||||
cache: list.New(),
|
||||
lock: sync.RWMutex{},
|
||||
}
|
||||
|
||||
// NewVisitRecord returns a new VisitRecord instance from the pool if available.
|
||||
// Note that this "map" uses a pointer as a key because we are emulating the behavior of
|
||||
// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal,
|
||||
// which means is the key the same reference to an object rather than is it .equals() to another
|
||||
// object.
|
||||
func NewVisitRecord() *VisitRecord {
|
||||
visitListPool.lock.Lock()
|
||||
el := visitListPool.cache.Front()
|
||||
defer visitListPool.lock.Unlock()
|
||||
var vr *VisitRecord
|
||||
if el == nil {
|
||||
vr = &VisitRecord{
|
||||
store: make(map[*PredictionContext]*PredictionContext),
|
||||
}
|
||||
if collectStats {
|
||||
vr.stats = &JStatRec{
|
||||
Source: PredictionContextCacheCollection,
|
||||
Description: "VisitRecord",
|
||||
}
|
||||
// Track where we created it from if we are being asked to do so
|
||||
if runtimeConfig.statsTraceStacks {
|
||||
vr.stats.CreateStack = debug.Stack()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
vr = el.Value.(*VisitRecord)
|
||||
visitListPool.cache.Remove(el)
|
||||
vr.store = make(map[*PredictionContext]*PredictionContext)
|
||||
}
|
||||
if collectStats {
|
||||
Statistics.AddJStatRec(vr.stats)
|
||||
}
|
||||
return vr
|
||||
}
|
||||
|
||||
func (vr *VisitRecord) Release() {
|
||||
vr.len = 0
|
||||
vr.store = nil
|
||||
if collectStats {
|
||||
vr.stats.MaxSize = 0
|
||||
vr.stats.CurSize = 0
|
||||
vr.stats.Gets = 0
|
||||
vr.stats.GetHits = 0
|
||||
vr.stats.GetMisses = 0
|
||||
vr.stats.GetHashConflicts = 0
|
||||
vr.stats.GetNoEnt = 0
|
||||
vr.stats.Puts = 0
|
||||
vr.stats.PutHits = 0
|
||||
vr.stats.PutMisses = 0
|
||||
vr.stats.PutHashConflicts = 0
|
||||
vr.stats.MaxSlotSize = 0
|
||||
}
|
||||
visitListPool.lock.Lock()
|
||||
visitListPool.cache.PushBack(vr)
|
||||
visitListPool.lock.Unlock()
|
||||
}
|
||||
|
||||
func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) {
|
||||
if collectStats {
|
||||
vr.stats.Gets++
|
||||
}
|
||||
v := vr.store[k]
|
||||
if v != nil {
|
||||
if collectStats {
|
||||
vr.stats.GetHits++
|
||||
}
|
||||
return v, true
|
||||
}
|
||||
if collectStats {
|
||||
vr.stats.GetNoEnt++
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) {
|
||||
if collectStats {
|
||||
vr.stats.Puts++
|
||||
}
|
||||
vr.store[k] = v
|
||||
vr.len++
|
||||
if collectStats {
|
||||
vr.stats.CurSize = vr.len
|
||||
if vr.len > vr.stats.MaxSize {
|
||||
vr.stats.MaxSize = vr.len
|
||||
}
|
||||
}
|
||||
return v, false
|
||||
}
|
426
e2e/vendor/github.com/antlr4-go/antlr/v4/lexer.go
generated
vendored
Normal file
426
e2e/vendor/github.com/antlr4-go/antlr/v4/lexer.go
generated
vendored
Normal file
@ -0,0 +1,426 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// A lexer is recognizer that draws input symbols from a character stream.
|
||||
// lexer grammars result in a subclass of this object. A Lexer object
|
||||
// uses simplified Match() and error recovery mechanisms in the interest
|
||||
// of speed.
|
||||
///
|
||||
|
||||
type Lexer interface {
|
||||
TokenSource
|
||||
Recognizer
|
||||
|
||||
Emit() Token
|
||||
|
||||
SetChannel(int)
|
||||
PushMode(int)
|
||||
PopMode() int
|
||||
SetType(int)
|
||||
SetMode(int)
|
||||
}
|
||||
|
||||
type BaseLexer struct {
|
||||
*BaseRecognizer
|
||||
|
||||
Interpreter ILexerATNSimulator
|
||||
TokenStartCharIndex int
|
||||
TokenStartLine int
|
||||
TokenStartColumn int
|
||||
ActionType int
|
||||
Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
|
||||
|
||||
input CharStream
|
||||
factory TokenFactory
|
||||
tokenFactorySourcePair *TokenSourceCharStreamPair
|
||||
token Token
|
||||
hitEOF bool
|
||||
channel int
|
||||
thetype int
|
||||
modeStack IntStack
|
||||
mode int
|
||||
text string
|
||||
}
|
||||
|
||||
func NewBaseLexer(input CharStream) *BaseLexer {
|
||||
|
||||
lexer := new(BaseLexer)
|
||||
|
||||
lexer.BaseRecognizer = NewBaseRecognizer()
|
||||
|
||||
lexer.input = input
|
||||
lexer.factory = CommonTokenFactoryDEFAULT
|
||||
lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
|
||||
|
||||
lexer.Virt = lexer
|
||||
|
||||
lexer.Interpreter = nil // child classes must populate it
|
||||
|
||||
// The goal of all lexer rules/methods is to create a token object.
|
||||
// l is an instance variable as multiple rules may collaborate to
|
||||
// create a single token. NextToken will return l object after
|
||||
// Matching lexer rule(s). If you subclass to allow multiple token
|
||||
// emissions, then set l to the last token to be Matched or
|
||||
// something non nil so that the auto token emit mechanism will not
|
||||
// emit another token.
|
||||
lexer.token = nil
|
||||
|
||||
// What character index in the stream did the current token start at?
|
||||
// Needed, for example, to get the text for current token. Set at
|
||||
// the start of NextToken.
|
||||
lexer.TokenStartCharIndex = -1
|
||||
|
||||
// The line on which the first character of the token resides///
|
||||
lexer.TokenStartLine = -1
|
||||
|
||||
// The character position of first character within the line///
|
||||
lexer.TokenStartColumn = -1
|
||||
|
||||
// Once we see EOF on char stream, next token will be EOF.
|
||||
// If you have DONE : EOF then you see DONE EOF.
|
||||
lexer.hitEOF = false
|
||||
|
||||
// The channel number for the current token///
|
||||
lexer.channel = TokenDefaultChannel
|
||||
|
||||
// The token type for the current token///
|
||||
lexer.thetype = TokenInvalidType
|
||||
|
||||
lexer.modeStack = make([]int, 0)
|
||||
lexer.mode = LexerDefaultMode
|
||||
|
||||
// You can set the text for the current token to override what is in
|
||||
// the input char buffer. Use setText() or can set l instance var.
|
||||
// /
|
||||
lexer.text = ""
|
||||
|
||||
return lexer
|
||||
}
|
||||
|
||||
const (
|
||||
LexerDefaultMode = 0
|
||||
LexerMore = -2
|
||||
LexerSkip = -3
|
||||
)
|
||||
|
||||
//goland:noinspection GoUnusedConst
|
||||
const (
|
||||
LexerDefaultTokenChannel = TokenDefaultChannel
|
||||
LexerHidden = TokenHiddenChannel
|
||||
LexerMinCharValue = 0x0000
|
||||
LexerMaxCharValue = 0x10FFFF
|
||||
)
|
||||
|
||||
func (b *BaseLexer) Reset() {
|
||||
// wack Lexer state variables
|
||||
if b.input != nil {
|
||||
b.input.Seek(0) // rewind the input
|
||||
}
|
||||
b.token = nil
|
||||
b.thetype = TokenInvalidType
|
||||
b.channel = TokenDefaultChannel
|
||||
b.TokenStartCharIndex = -1
|
||||
b.TokenStartColumn = -1
|
||||
b.TokenStartLine = -1
|
||||
b.text = ""
|
||||
|
||||
b.hitEOF = false
|
||||
b.mode = LexerDefaultMode
|
||||
b.modeStack = make([]int, 0)
|
||||
|
||||
b.Interpreter.reset()
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
|
||||
return b.Interpreter
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetInputStream() CharStream {
|
||||
return b.input
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetSourceName() string {
|
||||
return b.GrammarFileName
|
||||
}
|
||||
|
||||
func (b *BaseLexer) SetChannel(v int) {
|
||||
b.channel = v
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetTokenFactory() TokenFactory {
|
||||
return b.factory
|
||||
}
|
||||
|
||||
func (b *BaseLexer) setTokenFactory(f TokenFactory) {
|
||||
b.factory = f
|
||||
}
|
||||
|
||||
func (b *BaseLexer) safeMatch() (ret int) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
if re, ok := e.(RecognitionException); ok {
|
||||
b.notifyListeners(re) // Report error
|
||||
b.Recover(re)
|
||||
ret = LexerSkip // default
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return b.Interpreter.Match(b.input, b.mode)
|
||||
}
|
||||
|
||||
// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream.
|
||||
func (b *BaseLexer) NextToken() Token {
|
||||
if b.input == nil {
|
||||
panic("NextToken requires a non-nil input stream.")
|
||||
}
|
||||
|
||||
tokenStartMarker := b.input.Mark()
|
||||
|
||||
// previously in finally block
|
||||
defer func() {
|
||||
// make sure we release marker after Match or
|
||||
// unbuffered char stream will keep buffering
|
||||
b.input.Release(tokenStartMarker)
|
||||
}()
|
||||
|
||||
for {
|
||||
if b.hitEOF {
|
||||
b.EmitEOF()
|
||||
return b.token
|
||||
}
|
||||
b.token = nil
|
||||
b.channel = TokenDefaultChannel
|
||||
b.TokenStartCharIndex = b.input.Index()
|
||||
b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
|
||||
b.TokenStartLine = b.Interpreter.GetLine()
|
||||
b.text = ""
|
||||
continueOuter := false
|
||||
for {
|
||||
b.thetype = TokenInvalidType
|
||||
|
||||
ttype := b.safeMatch()
|
||||
|
||||
if b.input.LA(1) == TokenEOF {
|
||||
b.hitEOF = true
|
||||
}
|
||||
if b.thetype == TokenInvalidType {
|
||||
b.thetype = ttype
|
||||
}
|
||||
if b.thetype == LexerSkip {
|
||||
continueOuter = true
|
||||
break
|
||||
}
|
||||
if b.thetype != LexerMore {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if continueOuter {
|
||||
continue
|
||||
}
|
||||
if b.token == nil {
|
||||
b.Virt.Emit()
|
||||
}
|
||||
return b.token
|
||||
}
|
||||
}
|
||||
|
||||
// Skip instructs the lexer to Skip creating a token for current lexer rule
|
||||
// and look for another token. [NextToken] knows to keep looking when
|
||||
// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that
|
||||
// if token==nil at end of any token rule, it creates one for you
|
||||
// and emits it.
|
||||
func (b *BaseLexer) Skip() {
|
||||
b.thetype = LexerSkip
|
||||
}
|
||||
|
||||
func (b *BaseLexer) More() {
|
||||
b.thetype = LexerMore
|
||||
}
|
||||
|
||||
// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode
|
||||
// will be in force.
|
||||
func (b *BaseLexer) SetMode(m int) {
|
||||
b.mode = m
|
||||
}
|
||||
|
||||
// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the
|
||||
// current lexer mode to the supplied mode m.
|
||||
func (b *BaseLexer) PushMode(m int) {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("pushMode " + strconv.Itoa(m))
|
||||
}
|
||||
b.modeStack.Push(b.mode)
|
||||
b.mode = m
|
||||
}
|
||||
|
||||
// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to
|
||||
// return to.
|
||||
func (b *BaseLexer) PopMode() int {
|
||||
if len(b.modeStack) == 0 {
|
||||
panic("Empty Stack")
|
||||
}
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
|
||||
}
|
||||
i, _ := b.modeStack.Pop()
|
||||
b.mode = i
|
||||
return b.mode
|
||||
}
|
||||
|
||||
func (b *BaseLexer) inputStream() CharStream {
|
||||
return b.input
|
||||
}
|
||||
|
||||
// SetInputStream resets the lexer input stream and associated lexer state.
|
||||
func (b *BaseLexer) SetInputStream(input CharStream) {
|
||||
b.input = nil
|
||||
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
|
||||
b.Reset()
|
||||
b.input = input
|
||||
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
|
||||
return b.tokenFactorySourcePair
|
||||
}
|
||||
|
||||
// EmitToken by default does not support multiple emits per [NextToken] invocation
|
||||
// for efficiency reasons. Subclass and override this func, [NextToken],
|
||||
// and [GetToken] (to push tokens into a list and pull from that list
|
||||
// rather than a single variable as this implementation does).
|
||||
func (b *BaseLexer) EmitToken(token Token) {
|
||||
b.token = token
|
||||
}
|
||||
|
||||
// Emit is the standard method called to automatically emit a token at the
|
||||
// outermost lexical rule. The token object should point into the
|
||||
// char buffer start..stop. If there is a text override in 'text',
|
||||
// use that to set the token's text. Override this method to emit
|
||||
// custom [Token] objects or provide a new factory.
|
||||
// /
|
||||
func (b *BaseLexer) Emit() Token {
|
||||
t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
|
||||
b.EmitToken(t)
|
||||
return t
|
||||
}
|
||||
|
||||
// EmitEOF emits an EOF token. By default, this is the last token emitted
|
||||
func (b *BaseLexer) EmitEOF() Token {
|
||||
cpos := b.GetCharPositionInLine()
|
||||
lpos := b.GetLine()
|
||||
eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
|
||||
b.EmitToken(eof)
|
||||
return eof
|
||||
}
|
||||
|
||||
// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned.
|
||||
func (b *BaseLexer) GetCharPositionInLine() int {
|
||||
return b.Interpreter.GetCharPositionInLine()
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetLine() int {
|
||||
return b.Interpreter.GetLine()
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetType() int {
|
||||
return b.thetype
|
||||
}
|
||||
|
||||
func (b *BaseLexer) SetType(t int) {
|
||||
b.thetype = t
|
||||
}
|
||||
|
||||
// GetCharIndex returns the index of the current character of lookahead
|
||||
func (b *BaseLexer) GetCharIndex() int {
|
||||
return b.input.Index()
|
||||
}
|
||||
|
||||
// GetText returns the text Matched so far for the current token or any text override.
|
||||
func (b *BaseLexer) GetText() string {
|
||||
if b.text != "" {
|
||||
return b.text
|
||||
}
|
||||
|
||||
return b.Interpreter.GetText(b.input)
|
||||
}
|
||||
|
||||
// SetText sets the complete text of this token; it wipes any previous changes to the text.
|
||||
func (b *BaseLexer) SetText(text string) {
|
||||
b.text = text
|
||||
}
|
||||
|
||||
// GetATN returns the ATN used by the lexer.
|
||||
func (b *BaseLexer) GetATN() *ATN {
|
||||
return b.Interpreter.ATN()
|
||||
}
|
||||
|
||||
// GetAllTokens returns a list of all [Token] objects in input char stream.
|
||||
// Forces a load of all tokens that can be made from the input char stream.
|
||||
//
|
||||
// Does not include EOF token.
|
||||
func (b *BaseLexer) GetAllTokens() []Token {
|
||||
vl := b.Virt
|
||||
tokens := make([]Token, 0)
|
||||
t := vl.NextToken()
|
||||
for t.GetTokenType() != TokenEOF {
|
||||
tokens = append(tokens, t)
|
||||
t = vl.NextToken()
|
||||
}
|
||||
return tokens
|
||||
}
|
||||
|
||||
func (b *BaseLexer) notifyListeners(e RecognitionException) {
|
||||
start := b.TokenStartCharIndex
|
||||
stop := b.input.Index()
|
||||
text := b.input.GetTextFromInterval(NewInterval(start, stop))
|
||||
msg := "token recognition error at: '" + text + "'"
|
||||
listener := b.GetErrorListenerDispatch()
|
||||
listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
|
||||
}
|
||||
|
||||
func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
|
||||
if c == TokenEOF {
|
||||
return "<EOF>"
|
||||
} else if c == '\n' {
|
||||
return "\\n"
|
||||
} else if c == '\t' {
|
||||
return "\\t"
|
||||
} else if c == '\r' {
|
||||
return "\\r"
|
||||
} else {
|
||||
return string(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseLexer) getCharErrorDisplay(c rune) string {
|
||||
return "'" + b.getErrorDisplayForChar(c) + "'"
|
||||
}
|
||||
|
||||
// Recover can normally Match any char in its vocabulary after Matching
|
||||
// a token, so here we do the easy thing and just kill a character and hope
|
||||
// it all works out. You can instead use the rule invocation stack
|
||||
// to do sophisticated error recovery if you are in a fragment rule.
|
||||
//
|
||||
// In general, lexers should not need to recover and should have rules that cover any eventuality, such as
|
||||
// a character that makes no sense to the recognizer.
|
||||
func (b *BaseLexer) Recover(re RecognitionException) {
|
||||
if b.input.LA(1) != TokenEOF {
|
||||
if _, ok := re.(*LexerNoViableAltException); ok {
|
||||
// Skip a char and try again
|
||||
b.Interpreter.Consume(b.input)
|
||||
} else {
|
||||
// TODO: Do we lose character or line position information?
|
||||
b.input.Consume()
|
||||
}
|
||||
}
|
||||
}
|
452
e2e/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
generated
vendored
Normal file
452
e2e/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
generated
vendored
Normal file
@ -0,0 +1,452 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "strconv"
|
||||
|
||||
const (
|
||||
// LexerActionTypeChannel represents a [LexerChannelAction] action.
|
||||
LexerActionTypeChannel = 0
|
||||
|
||||
// LexerActionTypeCustom represents a [LexerCustomAction] action.
|
||||
LexerActionTypeCustom = 1
|
||||
|
||||
// LexerActionTypeMode represents a [LexerModeAction] action.
|
||||
LexerActionTypeMode = 2
|
||||
|
||||
// LexerActionTypeMore represents a [LexerMoreAction] action.
|
||||
LexerActionTypeMore = 3
|
||||
|
||||
// LexerActionTypePopMode represents a [LexerPopModeAction] action.
|
||||
LexerActionTypePopMode = 4
|
||||
|
||||
// LexerActionTypePushMode represents a [LexerPushModeAction] action.
|
||||
LexerActionTypePushMode = 5
|
||||
|
||||
// LexerActionTypeSkip represents a [LexerSkipAction] action.
|
||||
LexerActionTypeSkip = 6
|
||||
|
||||
// LexerActionTypeType represents a [LexerTypeAction] action.
|
||||
LexerActionTypeType = 7
|
||||
)
|
||||
|
||||
type LexerAction interface {
|
||||
getActionType() int
|
||||
getIsPositionDependent() bool
|
||||
execute(lexer Lexer)
|
||||
Hash() int
|
||||
Equals(other LexerAction) bool
|
||||
}
|
||||
|
||||
type BaseLexerAction struct {
|
||||
actionType int
|
||||
isPositionDependent bool
|
||||
}
|
||||
|
||||
func NewBaseLexerAction(action int) *BaseLexerAction {
|
||||
la := new(BaseLexerAction)
|
||||
|
||||
la.actionType = action
|
||||
la.isPositionDependent = false
|
||||
|
||||
return la
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) execute(_ Lexer) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) getActionType() int {
|
||||
return b.actionType
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) getIsPositionDependent() bool {
|
||||
return b.isPositionDependent
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) Hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, b.actionType)
|
||||
return murmurFinish(h, 1)
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) Equals(other LexerAction) bool {
|
||||
return b.actionType == other.getActionType()
|
||||
}
|
||||
|
||||
// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip].
|
||||
//
|
||||
// The Skip command does not have any parameters, so this action is
|
||||
// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE].
|
||||
type LexerSkipAction struct {
|
||||
*BaseLexerAction
|
||||
}
|
||||
|
||||
func NewLexerSkipAction() *LexerSkipAction {
|
||||
la := new(LexerSkipAction)
|
||||
la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip)
|
||||
return la
|
||||
}
|
||||
|
||||
// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action.
|
||||
var LexerSkipActionINSTANCE = NewLexerSkipAction()
|
||||
|
||||
func (l *LexerSkipAction) execute(lexer Lexer) {
|
||||
lexer.Skip()
|
||||
}
|
||||
|
||||
// String returns a string representation of the current [LexerSkipAction].
|
||||
func (l *LexerSkipAction) String() string {
|
||||
return "skip"
|
||||
}
|
||||
|
||||
func (b *LexerSkipAction) Equals(other LexerAction) bool {
|
||||
return other.getActionType() == LexerActionTypeSkip
|
||||
}
|
||||
|
||||
// Implements the {@code type} lexer action by calling {@link Lexer//setType}
|
||||
//
|
||||
// with the assigned type.
|
||||
type LexerTypeAction struct {
|
||||
*BaseLexerAction
|
||||
|
||||
thetype int
|
||||
}
|
||||
|
||||
func NewLexerTypeAction(thetype int) *LexerTypeAction {
|
||||
l := new(LexerTypeAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType)
|
||||
l.thetype = thetype
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *LexerTypeAction) execute(lexer Lexer) {
|
||||
lexer.SetType(l.thetype)
|
||||
}
|
||||
|
||||
func (l *LexerTypeAction) Hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.actionType)
|
||||
h = murmurUpdate(h, l.thetype)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
func (l *LexerTypeAction) Equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerTypeAction); !ok {
|
||||
return false
|
||||
} else {
|
||||
return l.thetype == other.(*LexerTypeAction).thetype
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LexerTypeAction) String() string {
|
||||
return "actionType(" + strconv.Itoa(l.thetype) + ")"
|
||||
}
|
||||
|
||||
// LexerPushModeAction implements the pushMode lexer action by calling
|
||||
// [Lexer.pushMode] with the assigned mode.
|
||||
type LexerPushModeAction struct {
|
||||
*BaseLexerAction
|
||||
mode int
|
||||
}
|
||||
|
||||
func NewLexerPushModeAction(mode int) *LexerPushModeAction {
|
||||
|
||||
l := new(LexerPushModeAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode)
|
||||
|
||||
l.mode = mode
|
||||
return l
|
||||
}
|
||||
|
||||
// <p>This action is implemented by calling {@link Lexer//pushMode} with the
|
||||
// value provided by {@link //getMode}.</p>
|
||||
func (l *LexerPushModeAction) execute(lexer Lexer) {
|
||||
lexer.PushMode(l.mode)
|
||||
}
|
||||
|
||||
func (l *LexerPushModeAction) Hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.actionType)
|
||||
h = murmurUpdate(h, l.mode)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
func (l *LexerPushModeAction) Equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerPushModeAction); !ok {
|
||||
return false
|
||||
} else {
|
||||
return l.mode == other.(*LexerPushModeAction).mode
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LexerPushModeAction) String() string {
|
||||
return "pushMode(" + strconv.Itoa(l.mode) + ")"
|
||||
}
|
||||
|
||||
// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode].
|
||||
//
|
||||
// The popMode command does not have any parameters, so this action is
|
||||
// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE]
|
||||
type LexerPopModeAction struct {
|
||||
*BaseLexerAction
|
||||
}
|
||||
|
||||
func NewLexerPopModeAction() *LexerPopModeAction {
|
||||
|
||||
l := new(LexerPopModeAction)
|
||||
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode)
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
|
||||
|
||||
// <p>This action is implemented by calling {@link Lexer//popMode}.</p>
|
||||
func (l *LexerPopModeAction) execute(lexer Lexer) {
|
||||
lexer.PopMode()
|
||||
}
|
||||
|
||||
func (l *LexerPopModeAction) String() string {
|
||||
return "popMode"
|
||||
}
|
||||
|
||||
// Implements the {@code more} lexer action by calling {@link Lexer//more}.
|
||||
//
|
||||
// <p>The {@code more} command does not have any parameters, so l action is
|
||||
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
|
||||
|
||||
type LexerMoreAction struct {
|
||||
*BaseLexerAction
|
||||
}
|
||||
|
||||
func NewLexerMoreAction() *LexerMoreAction {
|
||||
l := new(LexerMoreAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore)
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
var LexerMoreActionINSTANCE = NewLexerMoreAction()
|
||||
|
||||
// <p>This action is implemented by calling {@link Lexer//popMode}.</p>
|
||||
func (l *LexerMoreAction) execute(lexer Lexer) {
|
||||
lexer.More()
|
||||
}
|
||||
|
||||
func (l *LexerMoreAction) String() string {
|
||||
return "more"
|
||||
}
|
||||
|
||||
// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with
|
||||
// the assigned mode.
|
||||
type LexerModeAction struct {
|
||||
*BaseLexerAction
|
||||
mode int
|
||||
}
|
||||
|
||||
func NewLexerModeAction(mode int) *LexerModeAction {
|
||||
l := new(LexerModeAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode)
|
||||
l.mode = mode
|
||||
return l
|
||||
}
|
||||
|
||||
// <p>This action is implemented by calling {@link Lexer//mode} with the
|
||||
// value provided by {@link //getMode}.</p>
|
||||
func (l *LexerModeAction) execute(lexer Lexer) {
|
||||
lexer.SetMode(l.mode)
|
||||
}
|
||||
|
||||
func (l *LexerModeAction) Hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.actionType)
|
||||
h = murmurUpdate(h, l.mode)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
func (l *LexerModeAction) Equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerModeAction); !ok {
|
||||
return false
|
||||
} else {
|
||||
return l.mode == other.(*LexerModeAction).mode
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LexerModeAction) String() string {
|
||||
return "mode(" + strconv.Itoa(l.mode) + ")"
|
||||
}
|
||||
|
||||
// Executes a custom lexer action by calling {@link Recognizer//action} with the
|
||||
// rule and action indexes assigned to the custom action. The implementation of
|
||||
// a custom action is added to the generated code for the lexer in an override
|
||||
// of {@link Recognizer//action} when the grammar is compiled.
|
||||
//
|
||||
// <p>This class may represent embedded actions created with the <code>{...}</code>
|
||||
// syntax in ANTLR 4, as well as actions created for lexer commands where the
|
||||
// command argument could not be evaluated when the grammar was compiled.</p>
|
||||
|
||||
// Constructs a custom lexer action with the specified rule and action
|
||||
// indexes.
|
||||
//
|
||||
// @param ruleIndex The rule index to use for calls to
|
||||
// {@link Recognizer//action}.
|
||||
// @param actionIndex The action index to use for calls to
|
||||
// {@link Recognizer//action}.
|
||||
|
||||
type LexerCustomAction struct {
|
||||
*BaseLexerAction
|
||||
ruleIndex, actionIndex int
|
||||
}
|
||||
|
||||
func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
|
||||
l := new(LexerCustomAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom)
|
||||
l.ruleIndex = ruleIndex
|
||||
l.actionIndex = actionIndex
|
||||
l.isPositionDependent = true
|
||||
return l
|
||||
}
|
||||
|
||||
// <p>Custom actions are implemented by calling {@link Lexer//action} with the
|
||||
// appropriate rule and action indexes.</p>
|
||||
func (l *LexerCustomAction) execute(lexer Lexer) {
|
||||
lexer.Action(nil, l.ruleIndex, l.actionIndex)
|
||||
}
|
||||
|
||||
func (l *LexerCustomAction) Hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.actionType)
|
||||
h = murmurUpdate(h, l.ruleIndex)
|
||||
h = murmurUpdate(h, l.actionIndex)
|
||||
return murmurFinish(h, 3)
|
||||
}
|
||||
|
||||
func (l *LexerCustomAction) Equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerCustomAction); !ok {
|
||||
return false
|
||||
} else {
|
||||
return l.ruleIndex == other.(*LexerCustomAction).ruleIndex &&
|
||||
l.actionIndex == other.(*LexerCustomAction).actionIndex
|
||||
}
|
||||
}
|
||||
|
||||
// LexerChannelAction implements the channel lexer action by calling
|
||||
// [Lexer.setChannel] with the assigned channel.
|
||||
//
|
||||
// Constructs a new channel action with the specified channel value.
|
||||
type LexerChannelAction struct {
|
||||
*BaseLexerAction
|
||||
channel int
|
||||
}
|
||||
|
||||
// NewLexerChannelAction creates a channel lexer action by calling
|
||||
// [Lexer.setChannel] with the assigned channel.
|
||||
//
|
||||
// Constructs a new channel action with the specified channel value.
|
||||
func NewLexerChannelAction(channel int) *LexerChannelAction {
|
||||
l := new(LexerChannelAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
|
||||
l.channel = channel
|
||||
return l
|
||||
}
|
||||
|
||||
// <p>This action is implemented by calling {@link Lexer//setChannel} with the
|
||||
// value provided by {@link //getChannel}.</p>
|
||||
func (l *LexerChannelAction) execute(lexer Lexer) {
|
||||
lexer.SetChannel(l.channel)
|
||||
}
|
||||
|
||||
func (l *LexerChannelAction) Hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.actionType)
|
||||
h = murmurUpdate(h, l.channel)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
func (l *LexerChannelAction) Equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerChannelAction); !ok {
|
||||
return false
|
||||
} else {
|
||||
return l.channel == other.(*LexerChannelAction).channel
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LexerChannelAction) String() string {
|
||||
return "channel(" + strconv.Itoa(l.channel) + ")"
|
||||
}
|
||||
|
||||
// This implementation of {@link LexerAction} is used for tracking input offsets
|
||||
// for position-dependent actions within a {@link LexerActionExecutor}.
|
||||
//
|
||||
// <p>This action is not serialized as part of the ATN, and is only required for
|
||||
// position-dependent lexer actions which appear at a location other than the
|
||||
// end of a rule. For more information about DFA optimizations employed for
|
||||
// lexer actions, see {@link LexerActionExecutor//append} and
|
||||
// {@link LexerActionExecutor//fixOffsetBeforeMatch}.</p>
|
||||
|
||||
type LexerIndexedCustomAction struct {
|
||||
*BaseLexerAction
|
||||
offset int
|
||||
lexerAction LexerAction
|
||||
isPositionDependent bool
|
||||
}
|
||||
|
||||
// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset
|
||||
// with a [LexerAction].
|
||||
//
|
||||
// Note: This class is only required for lexer actions for which
|
||||
// [LexerAction.isPositionDependent] returns true.
|
||||
//
|
||||
// The offset points into the input [CharStream], relative to
|
||||
// the token start index, at which the specified lexerAction should be
|
||||
// executed.
|
||||
func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
|
||||
|
||||
l := new(LexerIndexedCustomAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType())
|
||||
|
||||
l.offset = offset
|
||||
l.lexerAction = lexerAction
|
||||
l.isPositionDependent = true
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// <p>This method calls {@link //execute} on the result of {@link //getAction}
|
||||
// using the provided {@code lexer}.</p>
|
||||
func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
|
||||
// assume the input stream position was properly set by the calling code
|
||||
l.lexerAction.execute(lexer)
|
||||
}
|
||||
|
||||
func (l *LexerIndexedCustomAction) Hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.offset)
|
||||
h = murmurUpdate(h, l.lexerAction.Hash())
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerIndexedCustomAction); !ok {
|
||||
return false
|
||||
} else {
|
||||
return l.offset == other.(*LexerIndexedCustomAction).offset &&
|
||||
l.lexerAction.Equals(other.(*LexerIndexedCustomAction).lexerAction)
|
||||
}
|
||||
}
|
173
e2e/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
generated
vendored
Normal file
173
e2e/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
generated
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "golang.org/x/exp/slices"
|
||||
|
||||
// Represents an executor for a sequence of lexer actions which traversed during
|
||||
// the Matching operation of a lexer rule (token).
|
||||
//
|
||||
// <p>The executor tracks position information for position-dependent lexer actions
|
||||
// efficiently, ensuring that actions appearing only at the end of the rule do
|
||||
// not cause bloating of the {@link DFA} created for the lexer.</p>
|
||||
|
||||
type LexerActionExecutor struct {
|
||||
lexerActions []LexerAction
|
||||
cachedHash int
|
||||
}
|
||||
|
||||
func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
|
||||
|
||||
if lexerActions == nil {
|
||||
lexerActions = make([]LexerAction, 0)
|
||||
}
|
||||
|
||||
l := new(LexerActionExecutor)
|
||||
|
||||
l.lexerActions = lexerActions
|
||||
|
||||
// Caches the result of {@link //hashCode} since the hash code is an element
|
||||
// of the performance-critical {@link ATNConfig//hashCode} operation.
|
||||
l.cachedHash = murmurInit(0)
|
||||
for _, a := range lexerActions {
|
||||
l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
|
||||
}
|
||||
l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions))
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for
|
||||
// the input [LexerActionExecutor] followed by a specified
|
||||
// [LexerAction].
|
||||
// TODO: This does not match the Java code
|
||||
func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
|
||||
if lexerActionExecutor == nil {
|
||||
return NewLexerActionExecutor([]LexerAction{lexerAction})
|
||||
}
|
||||
|
||||
return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
|
||||
}
|
||||
|
||||
// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset
|
||||
// for position-dependent lexer actions.
|
||||
//
|
||||
// Normally, when the executor encounters lexer actions where
|
||||
// [LexerAction.isPositionDependent] returns true, it calls
|
||||
// [IntStream.Seek] on the input [CharStream] to set the input
|
||||
// position to the end of the current token. This behavior provides
|
||||
// for efficient [DFA] representation of lexer actions which appear at the end
|
||||
// of a lexer rule, even when the lexer rule Matches a variable number of
|
||||
// characters.
|
||||
//
|
||||
// Prior to traversing a Match transition in the [ATN], the current offset
|
||||
// from the token start index is assigned to all position-dependent lexer
|
||||
// actions which have not already been assigned a fixed offset. By storing
|
||||
// the offsets relative to the token start index, the [DFA] representation of
|
||||
// lexer actions which appear in the middle of tokens remains efficient due
|
||||
// to sharing among tokens of the same Length, regardless of their absolute
|
||||
// position in the input stream.
|
||||
//
|
||||
// If the current executor already has offsets assigned to all
|
||||
// position-dependent lexer actions, the method returns this instance.
|
||||
//
|
||||
// The offset is assigned to all position-dependent
|
||||
// lexer actions which do not already have offsets assigned.
|
||||
//
|
||||
// The func returns a [LexerActionExecutor] that stores input stream offsets
|
||||
// for all position-dependent lexer actions.
|
||||
func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
|
||||
var updatedLexerActions []LexerAction
|
||||
for i := 0; i < len(l.lexerActions); i++ {
|
||||
_, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
|
||||
if l.lexerActions[i].getIsPositionDependent() && !ok {
|
||||
if updatedLexerActions == nil {
|
||||
updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions))
|
||||
updatedLexerActions = append(updatedLexerActions, l.lexerActions...)
|
||||
}
|
||||
updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
|
||||
}
|
||||
}
|
||||
if updatedLexerActions == nil {
|
||||
return l
|
||||
}
|
||||
|
||||
return NewLexerActionExecutor(updatedLexerActions)
|
||||
}
|
||||
|
||||
// Execute the actions encapsulated by l executor within the context of a
|
||||
// particular {@link Lexer}.
|
||||
//
|
||||
// <p>This method calls {@link IntStream//seek} to set the position of the
|
||||
// {@code input} {@link CharStream} prior to calling
|
||||
// {@link LexerAction//execute} on a position-dependent action. Before the
|
||||
// method returns, the input position will be restored to the same position
|
||||
// it was in when the method was invoked.</p>
|
||||
//
|
||||
// @param lexer The lexer instance.
|
||||
// @param input The input stream which is the source for the current token.
|
||||
// When l method is called, the current {@link IntStream//index} for
|
||||
// {@code input} should be the start of the following token, i.e. 1
|
||||
// character past the end of the current token.
|
||||
// @param startIndex The token start index. This value may be passed to
|
||||
// {@link IntStream//seek} to set the {@code input} position to the beginning
|
||||
// of the token.
|
||||
// /
|
||||
func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
|
||||
requiresSeek := false
|
||||
stopIndex := input.Index()
|
||||
|
||||
defer func() {
|
||||
if requiresSeek {
|
||||
input.Seek(stopIndex)
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < len(l.lexerActions); i++ {
|
||||
lexerAction := l.lexerActions[i]
|
||||
if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
|
||||
offset := la.offset
|
||||
input.Seek(startIndex + offset)
|
||||
lexerAction = la.lexerAction
|
||||
requiresSeek = (startIndex + offset) != stopIndex
|
||||
} else if lexerAction.getIsPositionDependent() {
|
||||
input.Seek(stopIndex)
|
||||
requiresSeek = false
|
||||
}
|
||||
lexerAction.execute(lexer)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LexerActionExecutor) Hash() int {
|
||||
if l == nil {
|
||||
// TODO: Why is this here? l should not be nil
|
||||
return 61
|
||||
}
|
||||
|
||||
// TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode
|
||||
return l.cachedHash
|
||||
}
|
||||
|
||||
func (l *LexerActionExecutor) Equals(other interface{}) bool {
|
||||
if l == other {
|
||||
return true
|
||||
}
|
||||
othert, ok := other.(*LexerActionExecutor)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if othert == nil {
|
||||
return false
|
||||
}
|
||||
if l.cachedHash != othert.cachedHash {
|
||||
return false
|
||||
}
|
||||
if len(l.lexerActions) != len(othert.lexerActions) {
|
||||
return false
|
||||
}
|
||||
return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool {
|
||||
return i.Equals(j)
|
||||
})
|
||||
}
|
677
e2e/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
generated
vendored
Normal file
677
e2e/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
generated
vendored
Normal file
@ -0,0 +1,677 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var (
|
||||
LexerATNSimulatorMinDFAEdge = 0
|
||||
LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
|
||||
|
||||
LexerATNSimulatorMatchCalls = 0
|
||||
)
|
||||
|
||||
type ILexerATNSimulator interface {
|
||||
IATNSimulator
|
||||
|
||||
reset()
|
||||
Match(input CharStream, mode int) int
|
||||
GetCharPositionInLine() int
|
||||
GetLine() int
|
||||
GetText(input CharStream) string
|
||||
Consume(input CharStream)
|
||||
}
|
||||
|
||||
type LexerATNSimulator struct {
|
||||
BaseATNSimulator
|
||||
|
||||
recog Lexer
|
||||
predictionMode int
|
||||
mergeCache *JPCMap2
|
||||
startIndex int
|
||||
Line int
|
||||
CharPositionInLine int
|
||||
mode int
|
||||
prevAccept *SimState
|
||||
MatchCalls int
|
||||
}
|
||||
|
||||
func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
|
||||
l := &LexerATNSimulator{
|
||||
BaseATNSimulator: BaseATNSimulator{
|
||||
atn: atn,
|
||||
sharedContextCache: sharedContextCache,
|
||||
},
|
||||
}
|
||||
|
||||
l.decisionToDFA = decisionToDFA
|
||||
l.recog = recog
|
||||
|
||||
// The current token's starting index into the character stream.
|
||||
// Shared across DFA to ATN simulation in case the ATN fails and the
|
||||
// DFA did not have a previous accept state. In l case, we use the
|
||||
// ATN-generated exception object.
|
||||
l.startIndex = -1
|
||||
|
||||
// line number 1..n within the input
|
||||
l.Line = 1
|
||||
|
||||
// The index of the character relative to the beginning of the line
|
||||
// 0..n-1
|
||||
l.CharPositionInLine = 0
|
||||
|
||||
l.mode = LexerDefaultMode
|
||||
|
||||
// Used during DFA/ATN exec to record the most recent accept configuration
|
||||
// info
|
||||
l.prevAccept = NewSimState()
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
|
||||
l.CharPositionInLine = simulator.CharPositionInLine
|
||||
l.Line = simulator.Line
|
||||
l.mode = simulator.mode
|
||||
l.startIndex = simulator.startIndex
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
|
||||
l.MatchCalls++
|
||||
l.mode = mode
|
||||
mark := input.Mark()
|
||||
|
||||
defer func() {
|
||||
input.Release(mark)
|
||||
}()
|
||||
|
||||
l.startIndex = input.Index()
|
||||
l.prevAccept.reset()
|
||||
|
||||
dfa := l.decisionToDFA[mode]
|
||||
|
||||
var s0 *DFAState
|
||||
l.atn.stateMu.RLock()
|
||||
s0 = dfa.getS0()
|
||||
l.atn.stateMu.RUnlock()
|
||||
|
||||
if s0 == nil {
|
||||
return l.MatchATN(input)
|
||||
}
|
||||
|
||||
return l.execATN(input, s0)
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) reset() {
|
||||
l.prevAccept.reset()
|
||||
l.startIndex = -1
|
||||
l.Line = 1
|
||||
l.CharPositionInLine = 0
|
||||
l.mode = LexerDefaultMode
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) MatchATN(input CharStream) int {
|
||||
startState := l.atn.modeToStartState[l.mode]
|
||||
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
|
||||
}
|
||||
oldMode := l.mode
|
||||
s0Closure := l.computeStartState(input, startState)
|
||||
suppressEdge := s0Closure.hasSemanticContext
|
||||
s0Closure.hasSemanticContext = false
|
||||
|
||||
next := l.addDFAState(s0Closure, suppressEdge)
|
||||
|
||||
predict := l.execATN(input, next)
|
||||
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
|
||||
}
|
||||
return predict
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
|
||||
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("start state closure=" + ds0.configs.String())
|
||||
}
|
||||
if ds0.isAcceptState {
|
||||
// allow zero-Length tokens
|
||||
l.captureSimState(l.prevAccept, input, ds0)
|
||||
}
|
||||
t := input.LA(1)
|
||||
s := ds0 // s is current/from DFA state
|
||||
|
||||
for { // while more work
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("execATN loop starting closure: " + s.configs.String())
|
||||
}
|
||||
|
||||
// As we move src->trg, src->trg, we keep track of the previous trg to
|
||||
// avoid looking up the DFA state again, which is expensive.
|
||||
// If the previous target was already part of the DFA, we might
|
||||
// be able to avoid doing a reach operation upon t. If s!=nil,
|
||||
// it means that semantic predicates didn't prevent us from
|
||||
// creating a DFA state. Once we know s!=nil, we check to see if
|
||||
// the DFA state has an edge already for t. If so, we can just reuse
|
||||
// it's configuration set there's no point in re-computing it.
|
||||
// This is kind of like doing DFA simulation within the ATN
|
||||
// simulation because DFA simulation is really just a way to avoid
|
||||
// computing reach/closure sets. Technically, once we know that
|
||||
// we have a previously added DFA state, we could jump over to
|
||||
// the DFA simulator. But, that would mean popping back and forth
|
||||
// a lot and making things more complicated algorithmically.
|
||||
// This optimization makes a lot of sense for loops within DFA.
|
||||
// A character will take us back to an existing DFA state
|
||||
// that already has lots of edges out of it. e.g., .* in comments.
|
||||
target := l.getExistingTargetState(s, t)
|
||||
if target == nil {
|
||||
target = l.computeTargetState(input, s, t)
|
||||
// print("Computed:" + str(target))
|
||||
}
|
||||
if target == ATNSimulatorError {
|
||||
break
|
||||
}
|
||||
// If l is a consumable input element, make sure to consume before
|
||||
// capturing the accept state so the input index, line, and char
|
||||
// position accurately reflect the state of the interpreter at the
|
||||
// end of the token.
|
||||
if t != TokenEOF {
|
||||
l.Consume(input)
|
||||
}
|
||||
if target.isAcceptState {
|
||||
l.captureSimState(l.prevAccept, input, target)
|
||||
if t == TokenEOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
t = input.LA(1)
|
||||
s = target // flip current DFA target becomes new src/from state
|
||||
}
|
||||
|
||||
return l.failOrAccept(l.prevAccept, input, s.configs, t)
|
||||
}
|
||||
|
||||
// Get an existing target state for an edge in the DFA. If the target state
|
||||
// for the edge has not yet been computed or is otherwise not available,
|
||||
// l method returns {@code nil}.
|
||||
//
|
||||
// @param s The current DFA state
|
||||
// @param t The next input symbol
|
||||
// @return The existing target DFA state for the given input symbol
|
||||
// {@code t}, or {@code nil} if the target state for l edge is not
|
||||
// already cached
|
||||
func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
|
||||
if t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
|
||||
return nil
|
||||
}
|
||||
|
||||
l.atn.edgeMu.RLock()
|
||||
defer l.atn.edgeMu.RUnlock()
|
||||
if s.getEdges() == nil {
|
||||
return nil
|
||||
}
|
||||
target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
|
||||
if runtimeConfig.lexerATNSimulatorDebug && target != nil {
|
||||
fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
|
||||
}
|
||||
return target
|
||||
}
|
||||
|
||||
// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the
|
||||
// computed state and corresponding edge to the [DFA].
|
||||
//
|
||||
// The func returns the computed target [DFA] state for the given input symbol t.
|
||||
// If this does not lead to a valid [DFA] state, this method
|
||||
// returns ATNSimulatorError.
|
||||
func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
|
||||
reach := NewOrderedATNConfigSet()
|
||||
|
||||
// if we don't find an existing DFA state
|
||||
// Fill reach starting from closure, following t transitions
|
||||
l.getReachableConfigSet(input, s.configs, reach, t)
|
||||
|
||||
if len(reach.configs) == 0 { // we got nowhere on t from s
|
||||
if !reach.hasSemanticContext {
|
||||
// we got nowhere on t, don't panic out l knowledge it'd
|
||||
// cause a fail-over from DFA later.
|
||||
l.addDFAEdge(s, t, ATNSimulatorError, nil)
|
||||
}
|
||||
// stop when we can't Match any more char
|
||||
return ATNSimulatorError
|
||||
}
|
||||
// Add an edge from s to target DFA found/created for reach
|
||||
return l.addDFAEdge(s, t, nil, reach)
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int {
|
||||
if l.prevAccept.dfaState != nil {
|
||||
lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
|
||||
l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
|
||||
return prevAccept.dfaState.prediction
|
||||
}
|
||||
|
||||
// if no accept and EOF is first char, return EOF
|
||||
if t == TokenEOF && input.Index() == l.startIndex {
|
||||
return TokenEOF
|
||||
}
|
||||
|
||||
panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
|
||||
}
|
||||
|
||||
// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations
|
||||
// we can reach upon input t.
|
||||
//
|
||||
// Parameter reach is a return parameter.
|
||||
func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) {
|
||||
// l is used to Skip processing for configs which have a lower priority
|
||||
// than a runtimeConfig that already reached an accept state for the same rule
|
||||
SkipAlt := ATNInvalidAltNumber
|
||||
|
||||
for _, cfg := range closure.configs {
|
||||
currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt
|
||||
if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision {
|
||||
continue
|
||||
}
|
||||
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
|
||||
fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String())
|
||||
}
|
||||
|
||||
for _, trans := range cfg.GetState().GetTransitions() {
|
||||
target := l.getReachableTarget(trans, t)
|
||||
if target != nil {
|
||||
lexerActionExecutor := cfg.lexerActionExecutor
|
||||
if lexerActionExecutor != nil {
|
||||
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
|
||||
}
|
||||
treatEOFAsEpsilon := t == TokenEOF
|
||||
config := NewLexerATNConfig3(cfg, target, lexerActionExecutor)
|
||||
if l.closure(input, config, reach,
|
||||
currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
|
||||
// any remaining configs for l alt have a lower priority
|
||||
// than the one that just reached an accept state.
|
||||
SkipAlt = cfg.GetAlt()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Printf("ACTION %v\n", lexerActionExecutor)
|
||||
}
|
||||
// seek to after last char in token
|
||||
input.Seek(index)
|
||||
l.Line = line
|
||||
l.CharPositionInLine = charPos
|
||||
if lexerActionExecutor != nil && l.recog != nil {
|
||||
lexerActionExecutor.execute(l.recog, input, startIndex)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState {
|
||||
if trans.Matches(t, 0, LexerMaxCharValue) {
|
||||
return trans.getTarget()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet {
|
||||
configs := NewOrderedATNConfigSet()
|
||||
for i := 0; i < len(p.GetTransitions()); i++ {
|
||||
target := p.GetTransitions()[i].getTarget()
|
||||
cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY)
|
||||
l.closure(input, cfg, configs, false, false, false)
|
||||
}
|
||||
|
||||
return configs
|
||||
}
|
||||
|
||||
// closure since the alternatives within any lexer decision are ordered by
|
||||
// preference, this method stops pursuing the closure as soon as an accept
|
||||
// state is reached. After the first accept state is reached by depth-first
|
||||
// search from runtimeConfig, all other (potentially reachable) states for
|
||||
// this rule would have a lower priority.
|
||||
//
|
||||
// The func returns true if an accept state is reached.
|
||||
func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet,
|
||||
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
|
||||
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("closure(" + config.String() + ")")
|
||||
}
|
||||
|
||||
_, ok := config.state.(*RuleStopState)
|
||||
if ok {
|
||||
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
if l.recog != nil {
|
||||
fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
|
||||
} else {
|
||||
fmt.Printf("closure at rule stop %s\n", config)
|
||||
}
|
||||
}
|
||||
|
||||
if config.context == nil || config.context.hasEmptyPath() {
|
||||
if config.context == nil || config.context.isEmpty() {
|
||||
configs.Add(config, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil)
|
||||
currentAltReachedAcceptState = true
|
||||
}
|
||||
if config.context != nil && !config.context.isEmpty() {
|
||||
for i := 0; i < config.context.length(); i++ {
|
||||
if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState {
|
||||
newContext := config.context.GetParent(i) // "pop" return state
|
||||
returnState := l.atn.states[config.context.getReturnState(i)]
|
||||
cfg := NewLexerATNConfig2(config, returnState, newContext)
|
||||
currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
|
||||
}
|
||||
}
|
||||
}
|
||||
return currentAltReachedAcceptState
|
||||
}
|
||||
// optimization
|
||||
if !config.state.GetEpsilonOnlyTransitions() {
|
||||
if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision {
|
||||
configs.Add(config, nil)
|
||||
}
|
||||
}
|
||||
for j := 0; j < len(config.state.GetTransitions()); j++ {
|
||||
trans := config.state.GetTransitions()[j]
|
||||
cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon)
|
||||
if cfg != nil {
|
||||
currentAltReachedAcceptState = l.closure(input, cfg, configs,
|
||||
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
|
||||
}
|
||||
}
|
||||
return currentAltReachedAcceptState
|
||||
}
|
||||
|
||||
// side-effect: can alter configs.hasSemanticContext
|
||||
func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition,
|
||||
configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig {
|
||||
|
||||
var cfg *ATNConfig
|
||||
|
||||
if trans.getSerializationType() == TransitionRULE {
|
||||
|
||||
rt := trans.(*RuleTransition)
|
||||
newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber())
|
||||
cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
|
||||
|
||||
} else if trans.getSerializationType() == TransitionPRECEDENCE {
|
||||
panic("Precedence predicates are not supported in lexers.")
|
||||
} else if trans.getSerializationType() == TransitionPREDICATE {
|
||||
// Track traversing semantic predicates. If we traverse,
|
||||
// we cannot add a DFA state for l "reach" computation
|
||||
// because the DFA would not test the predicate again in the
|
||||
// future. Rather than creating collections of semantic predicates
|
||||
// like v3 and testing them on prediction, v4 will test them on the
|
||||
// fly all the time using the ATN not the DFA. This is slower but
|
||||
// semantically it's not used that often. One of the key elements to
|
||||
// l predicate mechanism is not adding DFA states that see
|
||||
// predicates immediately afterwards in the ATN. For example,
|
||||
|
||||
// a : ID {p1}? | ID {p2}?
|
||||
|
||||
// should create the start state for rule 'a' (to save start state
|
||||
// competition), but should not create target of ID state. The
|
||||
// collection of ATN states the following ID references includes
|
||||
// states reached by traversing predicates. Since l is when we
|
||||
// test them, we cannot cash the DFA state target of ID.
|
||||
|
||||
pt := trans.(*PredicateTransition)
|
||||
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
|
||||
}
|
||||
configs.hasSemanticContext = true
|
||||
if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
|
||||
cfg = NewLexerATNConfig4(config, trans.getTarget())
|
||||
}
|
||||
} else if trans.getSerializationType() == TransitionACTION {
|
||||
if config.context == nil || config.context.hasEmptyPath() {
|
||||
// execute actions anywhere in the start rule for a token.
|
||||
//
|
||||
// TODO: if the entry rule is invoked recursively, some
|
||||
// actions may be executed during the recursive call. The
|
||||
// problem can appear when hasEmptyPath() is true but
|
||||
// isEmpty() is false. In this case, the config needs to be
|
||||
// split into two contexts - one with just the empty path
|
||||
// and another with everything but the empty path.
|
||||
// Unfortunately, the current algorithm does not allow
|
||||
// getEpsilonTarget to return two configurations, so
|
||||
// additional modifications are needed before we can support
|
||||
// the split operation.
|
||||
lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex])
|
||||
cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor)
|
||||
} else {
|
||||
// ignore actions in referenced rules
|
||||
cfg = NewLexerATNConfig4(config, trans.getTarget())
|
||||
}
|
||||
} else if trans.getSerializationType() == TransitionEPSILON {
|
||||
cfg = NewLexerATNConfig4(config, trans.getTarget())
|
||||
} else if trans.getSerializationType() == TransitionATOM ||
|
||||
trans.getSerializationType() == TransitionRANGE ||
|
||||
trans.getSerializationType() == TransitionSET {
|
||||
if treatEOFAsEpsilon {
|
||||
if trans.Matches(TokenEOF, 0, LexerMaxCharValue) {
|
||||
cfg = NewLexerATNConfig4(config, trans.getTarget())
|
||||
}
|
||||
}
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// evaluatePredicate eEvaluates a predicate specified in the lexer.
|
||||
//
|
||||
// If speculative is true, this method was called before
|
||||
// [consume] for the Matched character. This method should call
|
||||
// [consume] before evaluating the predicate to ensure position
|
||||
// sensitive values, including [GetText], [GetLine],
|
||||
// and [GetColumn], properly reflect the current
|
||||
// lexer state. This method should restore input and the simulator
|
||||
// to the original state before returning, i.e. undo the actions made by the
|
||||
// call to [Consume].
|
||||
//
|
||||
// The func returns true if the specified predicate evaluates to true.
|
||||
func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
|
||||
// assume true if no recognizer was provided
|
||||
if l.recog == nil {
|
||||
return true
|
||||
}
|
||||
if !speculative {
|
||||
return l.recog.Sempred(nil, ruleIndex, predIndex)
|
||||
}
|
||||
savedcolumn := l.CharPositionInLine
|
||||
savedLine := l.Line
|
||||
index := input.Index()
|
||||
marker := input.Mark()
|
||||
|
||||
defer func() {
|
||||
l.CharPositionInLine = savedcolumn
|
||||
l.Line = savedLine
|
||||
input.Seek(index)
|
||||
input.Release(marker)
|
||||
}()
|
||||
|
||||
l.Consume(input)
|
||||
return l.recog.Sempred(nil, ruleIndex, predIndex)
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
|
||||
settings.index = input.Index()
|
||||
settings.line = l.Line
|
||||
settings.column = l.CharPositionInLine
|
||||
settings.dfaState = dfaState
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState {
|
||||
if to == nil && cfgs != nil {
|
||||
// leading to l call, ATNConfigSet.hasSemanticContext is used as a
|
||||
// marker indicating dynamic predicate evaluation makes l edge
|
||||
// dependent on the specific input sequence, so the static edge in the
|
||||
// DFA should be omitted. The target DFAState is still created since
|
||||
// execATN has the ability to reSynchronize with the DFA state cache
|
||||
// following the predicate evaluation step.
|
||||
//
|
||||
// TJP notes: next time through the DFA, we see a pred again and eval.
|
||||
// If that gets us to a previously created (but dangling) DFA
|
||||
// state, we can continue in pure DFA mode from there.
|
||||
//
|
||||
suppressEdge := cfgs.hasSemanticContext
|
||||
cfgs.hasSemanticContext = false
|
||||
to = l.addDFAState(cfgs, true)
|
||||
|
||||
if suppressEdge {
|
||||
return to
|
||||
}
|
||||
}
|
||||
// add the edge
|
||||
if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge {
|
||||
// Only track edges within the DFA bounds
|
||||
return to
|
||||
}
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
|
||||
}
|
||||
l.atn.edgeMu.Lock()
|
||||
defer l.atn.edgeMu.Unlock()
|
||||
if from.getEdges() == nil {
|
||||
// make room for tokens 1..n and -1 masquerading as index 0
|
||||
from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1))
|
||||
}
|
||||
from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect
|
||||
|
||||
return to
|
||||
}
|
||||
|
||||
// Add a NewDFA state if there isn't one with l set of
|
||||
// configurations already. This method also detects the first
|
||||
// configuration containing an ATN rule stop state. Later, when
|
||||
// traversing the DFA, we will know which rule to accept.
|
||||
func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState {
|
||||
|
||||
proposed := NewDFAState(-1, configs)
|
||||
var firstConfigWithRuleStopState *ATNConfig
|
||||
|
||||
for _, cfg := range configs.configs {
|
||||
_, ok := cfg.GetState().(*RuleStopState)
|
||||
|
||||
if ok {
|
||||
firstConfigWithRuleStopState = cfg
|
||||
break
|
||||
}
|
||||
}
|
||||
if firstConfigWithRuleStopState != nil {
|
||||
proposed.isAcceptState = true
|
||||
proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor
|
||||
proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
|
||||
}
|
||||
dfa := l.decisionToDFA[l.mode]
|
||||
|
||||
l.atn.stateMu.Lock()
|
||||
defer l.atn.stateMu.Unlock()
|
||||
existing, present := dfa.Get(proposed)
|
||||
if present {
|
||||
|
||||
// This state was already present, so just return it.
|
||||
//
|
||||
proposed = existing
|
||||
} else {
|
||||
|
||||
// We need to add the new state
|
||||
//
|
||||
proposed.stateNumber = dfa.Len()
|
||||
configs.readOnly = true
|
||||
configs.configLookup = nil // Not needed now
|
||||
proposed.configs = configs
|
||||
dfa.Put(proposed)
|
||||
}
|
||||
if !suppressEdge {
|
||||
dfa.setS0(proposed)
|
||||
}
|
||||
return proposed
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) getDFA(mode int) *DFA {
|
||||
return l.decisionToDFA[mode]
|
||||
}
|
||||
|
||||
// GetText returns the text [Match]ed so far for the current token.
|
||||
func (l *LexerATNSimulator) GetText(input CharStream) string {
|
||||
// index is first lookahead char, don't include.
|
||||
return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) Consume(input CharStream) {
|
||||
curChar := input.LA(1)
|
||||
if curChar == int('\n') {
|
||||
l.Line++
|
||||
l.CharPositionInLine = 0
|
||||
} else {
|
||||
l.CharPositionInLine++
|
||||
}
|
||||
input.Consume()
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) GetCharPositionInLine() int {
|
||||
return l.CharPositionInLine
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) GetLine() int {
|
||||
return l.Line
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) GetTokenName(tt int) string {
|
||||
if tt == -1 {
|
||||
return "EOF"
|
||||
}
|
||||
|
||||
var sb strings.Builder
|
||||
sb.Grow(6)
|
||||
sb.WriteByte('\'')
|
||||
sb.WriteRune(rune(tt))
|
||||
sb.WriteByte('\'')
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func resetSimState(sim *SimState) {
|
||||
sim.index = -1
|
||||
sim.line = 0
|
||||
sim.column = -1
|
||||
sim.dfaState = nil
|
||||
}
|
||||
|
||||
type SimState struct {
|
||||
index int
|
||||
line int
|
||||
column int
|
||||
dfaState *DFAState
|
||||
}
|
||||
|
||||
func NewSimState() *SimState {
|
||||
s := new(SimState)
|
||||
resetSimState(s)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *SimState) reset() {
|
||||
resetSimState(s)
|
||||
}
|
218
e2e/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
generated
vendored
Normal file
218
e2e/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
generated
vendored
Normal file
@ -0,0 +1,218 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
type LL1Analyzer struct {
|
||||
atn *ATN
|
||||
}
|
||||
|
||||
func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
|
||||
la := new(LL1Analyzer)
|
||||
la.atn = atn
|
||||
return la
|
||||
}
|
||||
|
||||
const (
|
||||
// LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit
|
||||
// a predicate during analysis if
|
||||
//
|
||||
// seeThruPreds==false
|
||||
LL1AnalyzerHitPred = TokenInvalidType
|
||||
)
|
||||
|
||||
// *
|
||||
// Calculates the SLL(1) expected lookahead set for each outgoing transition
|
||||
// of an {@link ATNState}. The returned array has one element for each
|
||||
// outgoing transition in {@code s}. If the closure from transition
|
||||
// <em>i</em> leads to a semantic predicate before Matching a symbol, the
|
||||
// element at index <em>i</em> of the result will be {@code nil}.
|
||||
//
|
||||
// @param s the ATN state
|
||||
// @return the expected symbols for each outgoing transition of {@code s}.
|
||||
func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
count := len(s.GetTransitions())
|
||||
look := make([]*IntervalSet, count)
|
||||
for alt := 0; alt < count; alt++ {
|
||||
|
||||
look[alt] = NewIntervalSet()
|
||||
lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy")
|
||||
la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false)
|
||||
|
||||
// Wipe out lookahead for la alternative if we found nothing,
|
||||
// or we had a predicate when we !seeThruPreds
|
||||
if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
|
||||
look[alt] = nil
|
||||
}
|
||||
}
|
||||
return look
|
||||
}
|
||||
|
||||
// Look computes the set of tokens that can follow s in the [ATN] in the
|
||||
// specified ctx.
|
||||
//
|
||||
// If ctx is nil and the end of the rule containing
|
||||
// s is reached, [EPSILON] is added to the result set.
|
||||
//
|
||||
// If ctx is not nil and the end of the outermost rule is
|
||||
// reached, [EOF] is added to the result set.
|
||||
//
|
||||
// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a
|
||||
// [BlockEndState] to detect epsilon paths through a closure.
|
||||
//
|
||||
// Parameter ctx is the complete parser context, or nil if the context
|
||||
// should be ignored
|
||||
//
|
||||
// The func returns the set of tokens that can follow s in the [ATN] in the
|
||||
// specified ctx.
|
||||
func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
|
||||
r := NewIntervalSet()
|
||||
var lookContext *PredictionContext
|
||||
if ctx != nil {
|
||||
lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
|
||||
}
|
||||
la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"),
|
||||
NewBitSet(), true, true)
|
||||
return r
|
||||
}
|
||||
|
||||
//*
|
||||
// Compute set of tokens that can follow {@code s} in the ATN in the
|
||||
// specified {@code ctx}.
|
||||
//
|
||||
// <p>If {@code ctx} is {@code nil} and {@code stopState} or the end of the
|
||||
// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
|
||||
// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
|
||||
// {@code true} and {@code stopState} or the end of the outermost rule is
|
||||
// reached, {@link Token//EOF} is added to the result set.</p>
|
||||
//
|
||||
// @param s the ATN state.
|
||||
// @param stopState the ATN state to stop at. This can be a
|
||||
// {@link BlockEndState} to detect epsilon paths through a closure.
|
||||
// @param ctx The outer context, or {@code nil} if the outer context should
|
||||
// not be used.
|
||||
// @param look The result lookahead set.
|
||||
// @param lookBusy A set used for preventing epsilon closures in the ATN
|
||||
// from causing a stack overflow. Outside code should pass
|
||||
// {@code NewSet<ATNConfig>} for la argument.
|
||||
// @param calledRuleStack A set used for preventing left recursion in the
|
||||
// ATN from causing a stack overflow. Outside code should pass
|
||||
// {@code NewBitSet()} for la argument.
|
||||
// @param seeThruPreds {@code true} to true semantic predicates as
|
||||
// implicitly {@code true} and "see through them", otherwise {@code false}
|
||||
// to treat semantic predicates as opaque and add {@link //HitPred} to the
|
||||
// result if one is encountered.
|
||||
// @param addEOF Add {@link Token//EOF} to the result if the end of the
|
||||
// outermost context is reached. This parameter has no effect if {@code ctx}
|
||||
// is {@code nil}.
|
||||
|
||||
func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
|
||||
calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
|
||||
|
||||
returnState := la.atn.states[ctx.getReturnState(i)]
|
||||
la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
||||
|
||||
}
|
||||
|
||||
func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
|
||||
|
||||
c := NewATNConfig6(s, 0, ctx)
|
||||
|
||||
if lookBusy.Contains(c) {
|
||||
return
|
||||
}
|
||||
|
||||
_, present := lookBusy.Put(c)
|
||||
if present {
|
||||
return
|
||||
|
||||
}
|
||||
if s == stopState {
|
||||
if ctx == nil {
|
||||
look.addOne(TokenEpsilon)
|
||||
return
|
||||
} else if ctx.isEmpty() && addEOF {
|
||||
look.addOne(TokenEOF)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
_, ok := s.(*RuleStopState)
|
||||
|
||||
if ok {
|
||||
if ctx == nil {
|
||||
look.addOne(TokenEpsilon)
|
||||
return
|
||||
} else if ctx.isEmpty() && addEOF {
|
||||
look.addOne(TokenEOF)
|
||||
return
|
||||
}
|
||||
|
||||
if ctx.pcType != PredictionContextEmpty {
|
||||
removed := calledRuleStack.contains(s.GetRuleIndex())
|
||||
defer func() {
|
||||
if removed {
|
||||
calledRuleStack.add(s.GetRuleIndex())
|
||||
}
|
||||
}()
|
||||
calledRuleStack.remove(s.GetRuleIndex())
|
||||
// run thru all possible stack tops in ctx
|
||||
for i := 0; i < ctx.length(); i++ {
|
||||
returnState := la.atn.states[ctx.getReturnState(i)]
|
||||
la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
n := len(s.GetTransitions())
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
t := s.GetTransitions()[i]
|
||||
|
||||
if t1, ok := t.(*RuleTransition); ok {
|
||||
if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
|
||||
continue
|
||||
}
|
||||
|
||||
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
|
||||
la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
|
||||
} else if t2, ok := t.(AbstractPredicateTransition); ok {
|
||||
if seeThruPreds {
|
||||
la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
||||
} else {
|
||||
look.addOne(LL1AnalyzerHitPred)
|
||||
}
|
||||
} else if t.getIsEpsilon() {
|
||||
la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
||||
} else if _, ok := t.(*WildcardTransition); ok {
|
||||
look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
|
||||
} else {
|
||||
set := t.getLabel()
|
||||
if set != nil {
|
||||
if _, ok := t.(*NotSetTransition); ok {
|
||||
set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
|
||||
}
|
||||
look.addSet(set)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
|
||||
calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
|
||||
|
||||
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
|
||||
|
||||
defer func() {
|
||||
calledRuleStack.remove(t1.getTarget().GetRuleIndex())
|
||||
}()
|
||||
|
||||
calledRuleStack.add(t1.getTarget().GetRuleIndex())
|
||||
la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
||||
|
||||
}
|
47
e2e/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
generated
vendored
Normal file
47
e2e/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
//go:build !antlr.stats
|
||||
|
||||
package antlr
|
||||
|
||||
// This file is compiled when the build configuration antlr.stats is not enabled.
|
||||
// which then allows the compiler to optimize out all the code that is not used.
|
||||
const collectStats = false
|
||||
|
||||
// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled.
|
||||
type goRunStats struct {
|
||||
}
|
||||
|
||||
var Statistics = &goRunStats{}
|
||||
|
||||
func (s *goRunStats) AddJStatRec(_ *JStatRec) {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
}
|
||||
|
||||
func (s *goRunStats) CollectionAnomalies() {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
}
|
||||
|
||||
func (s *goRunStats) Reset() {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
}
|
||||
|
||||
func (s *goRunStats) Report(dir string, prefix string) error {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *goRunStats) Analyze() {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
}
|
||||
|
||||
type statsOption func(*goRunStats) error
|
||||
|
||||
func (s *goRunStats) Configure(options ...statsOption) error {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
return nil
|
||||
}
|
||||
|
||||
func WithTopN(topN int) statsOption {
|
||||
return func(s *goRunStats) error {
|
||||
return nil
|
||||
}
|
||||
}
|
700
e2e/vendor/github.com/antlr4-go/antlr/v4/parser.go
generated
vendored
Normal file
700
e2e/vendor/github.com/antlr4-go/antlr/v4/parser.go
generated
vendored
Normal file
@ -0,0 +1,700 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Parser interface {
|
||||
Recognizer
|
||||
|
||||
GetInterpreter() *ParserATNSimulator
|
||||
|
||||
GetTokenStream() TokenStream
|
||||
GetTokenFactory() TokenFactory
|
||||
GetParserRuleContext() ParserRuleContext
|
||||
SetParserRuleContext(ParserRuleContext)
|
||||
Consume() Token
|
||||
GetParseListeners() []ParseTreeListener
|
||||
|
||||
GetErrorHandler() ErrorStrategy
|
||||
SetErrorHandler(ErrorStrategy)
|
||||
GetInputStream() IntStream
|
||||
GetCurrentToken() Token
|
||||
GetExpectedTokens() *IntervalSet
|
||||
NotifyErrorListeners(string, Token, RecognitionException)
|
||||
IsExpectedToken(int) bool
|
||||
GetPrecedence() int
|
||||
GetRuleInvocationStack(ParserRuleContext) []string
|
||||
}
|
||||
|
||||
type BaseParser struct {
|
||||
*BaseRecognizer
|
||||
|
||||
Interpreter *ParserATNSimulator
|
||||
BuildParseTrees bool
|
||||
|
||||
input TokenStream
|
||||
errHandler ErrorStrategy
|
||||
precedenceStack IntStack
|
||||
ctx ParserRuleContext
|
||||
|
||||
tracer *TraceListener
|
||||
parseListeners []ParseTreeListener
|
||||
_SyntaxErrors int
|
||||
}
|
||||
|
||||
// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error
|
||||
// recovery stuff.
|
||||
//
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewBaseParser(input TokenStream) *BaseParser {
|
||||
|
||||
p := new(BaseParser)
|
||||
|
||||
p.BaseRecognizer = NewBaseRecognizer()
|
||||
|
||||
// The input stream.
|
||||
p.input = nil
|
||||
|
||||
// The error handling strategy for the parser. The default value is a new
|
||||
// instance of {@link DefaultErrorStrategy}.
|
||||
p.errHandler = NewDefaultErrorStrategy()
|
||||
p.precedenceStack = make([]int, 0)
|
||||
p.precedenceStack.Push(0)
|
||||
|
||||
// The ParserRuleContext object for the currently executing rule.
|
||||
// p.is always non-nil during the parsing process.
|
||||
p.ctx = nil
|
||||
|
||||
// Specifies whether the parser should construct a parse tree during
|
||||
// the parsing process. The default value is {@code true}.
|
||||
p.BuildParseTrees = true
|
||||
|
||||
// When setTrace(true) is called, a reference to the
|
||||
// TraceListener is stored here, so it can be easily removed in a
|
||||
// later call to setTrace(false). The listener itself is
|
||||
// implemented as a parser listener so p.field is not directly used by
|
||||
// other parser methods.
|
||||
p.tracer = nil
|
||||
|
||||
// The list of ParseTreeListener listeners registered to receive
|
||||
// events during the parse.
|
||||
p.parseListeners = nil
|
||||
|
||||
// The number of syntax errors Reported during parsing. p.value is
|
||||
// incremented each time NotifyErrorListeners is called.
|
||||
p._SyntaxErrors = 0
|
||||
p.SetInputStream(input)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// This field maps from the serialized ATN string to the deserialized [ATN] with
|
||||
// bypass alternatives.
|
||||
//
|
||||
// [ATNDeserializationOptions.isGenerateRuleBypassTransitions]
|
||||
//
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var bypassAltsAtnCache = make(map[string]int)
|
||||
|
||||
// reset the parser's state//
|
||||
func (p *BaseParser) reset() {
|
||||
if p.input != nil {
|
||||
p.input.Seek(0)
|
||||
}
|
||||
p.errHandler.reset(p)
|
||||
p.ctx = nil
|
||||
p._SyntaxErrors = 0
|
||||
p.SetTrace(nil)
|
||||
p.precedenceStack = make([]int, 0)
|
||||
p.precedenceStack.Push(0)
|
||||
if p.Interpreter != nil {
|
||||
p.Interpreter.reset()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetErrorHandler() ErrorStrategy {
|
||||
return p.errHandler
|
||||
}
|
||||
|
||||
func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
|
||||
p.errHandler = e
|
||||
}
|
||||
|
||||
// Match current input symbol against {@code ttype}. If the symbol type
|
||||
// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
|
||||
// called to complete the Match process.
|
||||
//
|
||||
// <p>If the symbol type does not Match,
|
||||
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
|
||||
// strategy to attempt recovery. If {@link //getBuildParseTree} is
|
||||
// {@code true} and the token index of the symbol returned by
|
||||
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
|
||||
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
|
||||
//
|
||||
// @param ttype the token type to Match
|
||||
// @return the Matched symbol
|
||||
// @panics RecognitionException if the current input symbol did not Match
|
||||
// {@code ttype} and the error strategy could not recover from the
|
||||
// mismatched symbol
|
||||
|
||||
func (p *BaseParser) Match(ttype int) Token {
|
||||
|
||||
t := p.GetCurrentToken()
|
||||
|
||||
if t.GetTokenType() == ttype {
|
||||
p.errHandler.ReportMatch(p)
|
||||
p.Consume()
|
||||
} else {
|
||||
t = p.errHandler.RecoverInline(p)
|
||||
if p.HasError() {
|
||||
return nil
|
||||
}
|
||||
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
|
||||
|
||||
// we must have conjured up a new token during single token
|
||||
// insertion if it's not the current symbol
|
||||
p.ctx.AddErrorNode(t)
|
||||
}
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
// Match current input symbol as a wildcard. If the symbol type Matches
|
||||
// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
|
||||
// and {@link //consume} are called to complete the Match process.
|
||||
//
|
||||
// <p>If the symbol type does not Match,
|
||||
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
|
||||
// strategy to attempt recovery. If {@link //getBuildParseTree} is
|
||||
// {@code true} and the token index of the symbol returned by
|
||||
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
|
||||
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
|
||||
//
|
||||
// @return the Matched symbol
|
||||
// @panics RecognitionException if the current input symbol did not Match
|
||||
// a wildcard and the error strategy could not recover from the mismatched
|
||||
// symbol
|
||||
|
||||
func (p *BaseParser) MatchWildcard() Token {
|
||||
t := p.GetCurrentToken()
|
||||
if t.GetTokenType() > 0 {
|
||||
p.errHandler.ReportMatch(p)
|
||||
p.Consume()
|
||||
} else {
|
||||
t = p.errHandler.RecoverInline(p)
|
||||
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
|
||||
// we must have conjured up a new token during single token
|
||||
// insertion if it's not the current symbol
|
||||
p.ctx.AddErrorNode(t)
|
||||
}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
|
||||
return p.ctx
|
||||
}
|
||||
|
||||
func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
|
||||
p.ctx = v
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetParseListeners() []ParseTreeListener {
|
||||
if p.parseListeners == nil {
|
||||
return make([]ParseTreeListener, 0)
|
||||
}
|
||||
return p.parseListeners
|
||||
}
|
||||
|
||||
// AddParseListener registers listener to receive events during the parsing process.
|
||||
//
|
||||
// To support output-preserving grammar transformations (including but not
|
||||
// limited to left-recursion removal, automated left-factoring, and
|
||||
// optimized code generation), calls to listener methods during the parse
|
||||
// may differ substantially from calls made by
|
||||
// [ParseTreeWalker.DEFAULT] used after the parse is complete. In
|
||||
// particular, rule entry and exit events may occur in a different order
|
||||
// during the parse than after the parser. In addition, calls to certain
|
||||
// rule entry methods may be omitted.
|
||||
//
|
||||
// With the following specific exceptions, calls to listener events are
|
||||
// deterministic, i.e. for identical input the calls to listener
|
||||
// methods will be the same.
|
||||
//
|
||||
// - Alterations to the grammar used to generate code may change the
|
||||
// behavior of the listener calls.
|
||||
// - Alterations to the command line options passed to ANTLR 4 when
|
||||
// generating the parser may change the behavior of the listener calls.
|
||||
// - Changing the version of the ANTLR Tool used to generate the parser
|
||||
// may change the behavior of the listener calls.
|
||||
func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
|
||||
if listener == nil {
|
||||
panic("listener")
|
||||
}
|
||||
if p.parseListeners == nil {
|
||||
p.parseListeners = make([]ParseTreeListener, 0)
|
||||
}
|
||||
p.parseListeners = append(p.parseListeners, listener)
|
||||
}
|
||||
|
||||
// RemoveParseListener removes listener from the list of parse listeners.
|
||||
//
|
||||
// If listener is nil or has not been added as a parse
|
||||
// listener, this func does nothing.
|
||||
func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
|
||||
|
||||
if p.parseListeners != nil {
|
||||
|
||||
idx := -1
|
||||
for i, v := range p.parseListeners {
|
||||
if v == listener {
|
||||
idx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if idx == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
// remove the listener from the slice
|
||||
p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
|
||||
|
||||
if len(p.parseListeners) == 0 {
|
||||
p.parseListeners = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove all parse listeners.
|
||||
func (p *BaseParser) removeParseListeners() {
|
||||
p.parseListeners = nil
|
||||
}
|
||||
|
||||
// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event.
|
||||
func (p *BaseParser) TriggerEnterRuleEvent() {
|
||||
if p.parseListeners != nil {
|
||||
ctx := p.ctx
|
||||
for _, listener := range p.parseListeners {
|
||||
listener.EnterEveryRule(ctx)
|
||||
ctx.EnterRule(listener)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TriggerExitRuleEvent notifies any parse listeners of an exit rule event.
|
||||
func (p *BaseParser) TriggerExitRuleEvent() {
|
||||
if p.parseListeners != nil {
|
||||
// reverse order walk of listeners
|
||||
ctx := p.ctx
|
||||
l := len(p.parseListeners) - 1
|
||||
|
||||
for i := range p.parseListeners {
|
||||
listener := p.parseListeners[l-i]
|
||||
ctx.ExitRule(listener)
|
||||
listener.ExitEveryRule(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
|
||||
return p.Interpreter
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetATN() *ATN {
|
||||
return p.Interpreter.atn
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetTokenFactory() TokenFactory {
|
||||
return p.input.GetTokenSource().GetTokenFactory()
|
||||
}
|
||||
|
||||
// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens.
|
||||
func (p *BaseParser) setTokenFactory(factory TokenFactory) {
|
||||
p.input.GetTokenSource().setTokenFactory(factory)
|
||||
}
|
||||
|
||||
// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it
|
||||
// lazily.
|
||||
func (p *BaseParser) GetATNWithBypassAlts() {
|
||||
|
||||
// TODO - Implement this?
|
||||
panic("Not implemented!")
|
||||
|
||||
// serializedAtn := p.getSerializedATN()
|
||||
// if (serializedAtn == nil) {
|
||||
// panic("The current parser does not support an ATN with bypass alternatives.")
|
||||
// }
|
||||
// result := p.bypassAltsAtnCache[serializedAtn]
|
||||
// if (result == nil) {
|
||||
// deserializationOptions := NewATNDeserializationOptions(nil)
|
||||
// deserializationOptions.generateRuleBypassTransitions = true
|
||||
// result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
|
||||
// p.bypassAltsAtnCache[serializedAtn] = result
|
||||
// }
|
||||
// return result
|
||||
}
|
||||
|
||||
// The preferred method of getting a tree pattern. For example, here's a
|
||||
// sample use:
|
||||
//
|
||||
// <pre>
|
||||
// ParseTree t = parser.expr()
|
||||
// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
|
||||
// MyParser.RULE_expr)
|
||||
// ParseTreeMatch m = p.Match(t)
|
||||
// String id = m.Get("ID")
|
||||
// </pre>
|
||||
|
||||
//goland:noinspection GoUnusedParameter
|
||||
func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
|
||||
|
||||
panic("NewParseTreePatternMatcher not implemented!")
|
||||
//
|
||||
// if (lexer == nil) {
|
||||
// if (p.GetTokenStream() != nil) {
|
||||
// tokenSource := p.GetTokenStream().GetTokenSource()
|
||||
// if _, ok := tokenSource.(ILexer); ok {
|
||||
// lexer = tokenSource
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// if (lexer == nil) {
|
||||
// panic("Parser can't discover a lexer to use")
|
||||
// }
|
||||
|
||||
// m := NewParseTreePatternMatcher(lexer, p)
|
||||
// return m.compile(pattern, patternRuleIndex)
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetInputStream() IntStream {
|
||||
return p.GetTokenStream()
|
||||
}
|
||||
|
||||
func (p *BaseParser) SetInputStream(input TokenStream) {
|
||||
p.SetTokenStream(input)
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetTokenStream() TokenStream {
|
||||
return p.input
|
||||
}
|
||||
|
||||
// SetTokenStream installs input as the token stream and resets the parser.
|
||||
func (p *BaseParser) SetTokenStream(input TokenStream) {
|
||||
p.input = nil
|
||||
p.reset()
|
||||
p.input = input
|
||||
}
|
||||
|
||||
// GetCurrentToken returns the current token at LT(1).
|
||||
//
|
||||
// [Match] needs to return the current input symbol, which gets put
|
||||
// into the label for the associated token ref e.g., x=ID.
|
||||
func (p *BaseParser) GetCurrentToken() Token {
|
||||
return p.input.LT(1)
|
||||
}
|
||||
|
||||
func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
|
||||
if offendingToken == nil {
|
||||
offendingToken = p.GetCurrentToken()
|
||||
}
|
||||
p._SyntaxErrors++
|
||||
line := offendingToken.GetLine()
|
||||
column := offendingToken.GetColumn()
|
||||
listener := p.GetErrorListenerDispatch()
|
||||
listener.SyntaxError(p, offendingToken, line, column, msg, err)
|
||||
}
|
||||
|
||||
func (p *BaseParser) Consume() Token {
|
||||
o := p.GetCurrentToken()
|
||||
if o.GetTokenType() != TokenEOF {
|
||||
p.GetInputStream().Consume()
|
||||
}
|
||||
hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
|
||||
if p.BuildParseTrees || hasListener {
|
||||
if p.errHandler.InErrorRecoveryMode(p) {
|
||||
node := p.ctx.AddErrorNode(o)
|
||||
if p.parseListeners != nil {
|
||||
for _, l := range p.parseListeners {
|
||||
l.VisitErrorNode(node)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
node := p.ctx.AddTokenNode(o)
|
||||
if p.parseListeners != nil {
|
||||
for _, l := range p.parseListeners {
|
||||
l.VisitTerminal(node)
|
||||
}
|
||||
}
|
||||
}
|
||||
// node.invokingState = p.state
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
func (p *BaseParser) addContextToParseTree() {
|
||||
// add current context to parent if we have a parent
|
||||
if p.ctx.GetParent() != nil {
|
||||
p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) {
|
||||
p.SetState(state)
|
||||
p.ctx = localctx
|
||||
p.ctx.SetStart(p.input.LT(1))
|
||||
if p.BuildParseTrees {
|
||||
p.addContextToParseTree()
|
||||
}
|
||||
if p.parseListeners != nil {
|
||||
p.TriggerEnterRuleEvent()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) ExitRule() {
|
||||
p.ctx.SetStop(p.input.LT(-1))
|
||||
// trigger event on ctx, before it reverts to parent
|
||||
if p.parseListeners != nil {
|
||||
p.TriggerExitRuleEvent()
|
||||
}
|
||||
p.SetState(p.ctx.GetInvokingState())
|
||||
if p.ctx.GetParent() != nil {
|
||||
p.ctx = p.ctx.GetParent().(ParserRuleContext)
|
||||
} else {
|
||||
p.ctx = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
|
||||
localctx.SetAltNumber(altNum)
|
||||
// if we have a new localctx, make sure we replace existing ctx
|
||||
// that is previous child of parse tree
|
||||
if p.BuildParseTrees && p.ctx != localctx {
|
||||
if p.ctx.GetParent() != nil {
|
||||
p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
|
||||
p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
|
||||
}
|
||||
}
|
||||
p.ctx = localctx
|
||||
}
|
||||
|
||||
// Get the precedence level for the top-most precedence rule.
|
||||
//
|
||||
// @return The precedence level for the top-most precedence rule, or -1 if
|
||||
// the parser context is not nested within a precedence rule.
|
||||
|
||||
func (p *BaseParser) GetPrecedence() int {
|
||||
if len(p.precedenceStack) == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return p.precedenceStack[len(p.precedenceStack)-1]
|
||||
}
|
||||
|
||||
func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) {
|
||||
p.SetState(state)
|
||||
p.precedenceStack.Push(precedence)
|
||||
p.ctx = localctx
|
||||
p.ctx.SetStart(p.input.LT(1))
|
||||
if p.parseListeners != nil {
|
||||
p.TriggerEnterRuleEvent() // simulates rule entry for
|
||||
// left-recursive rules
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Like {@link //EnterRule} but for recursive rules.
|
||||
|
||||
func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) {
|
||||
previous := p.ctx
|
||||
previous.SetParent(localctx)
|
||||
previous.SetInvokingState(state)
|
||||
previous.SetStop(p.input.LT(-1))
|
||||
|
||||
p.ctx = localctx
|
||||
p.ctx.SetStart(previous.GetStart())
|
||||
if p.BuildParseTrees {
|
||||
p.ctx.AddChild(previous)
|
||||
}
|
||||
if p.parseListeners != nil {
|
||||
p.TriggerEnterRuleEvent() // simulates rule entry for
|
||||
// left-recursive rules
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
|
||||
_, _ = p.precedenceStack.Pop()
|
||||
p.ctx.SetStop(p.input.LT(-1))
|
||||
retCtx := p.ctx // save current ctx (return value)
|
||||
// unroll so ctx is as it was before call to recursive method
|
||||
if p.parseListeners != nil {
|
||||
for p.ctx != parentCtx {
|
||||
p.TriggerExitRuleEvent()
|
||||
p.ctx = p.ctx.GetParent().(ParserRuleContext)
|
||||
}
|
||||
} else {
|
||||
p.ctx = parentCtx
|
||||
}
|
||||
// hook into tree
|
||||
retCtx.SetParent(parentCtx)
|
||||
if p.BuildParseTrees && parentCtx != nil {
|
||||
// add return ctx into invoking rule's tree
|
||||
parentCtx.AddChild(retCtx)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
|
||||
ctx := p.ctx
|
||||
for ctx != nil {
|
||||
if ctx.GetRuleIndex() == ruleIndex {
|
||||
return ctx
|
||||
}
|
||||
ctx = ctx.GetParent().(ParserRuleContext)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool {
|
||||
return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedParameter
|
||||
func (p *BaseParser) inContext(context ParserRuleContext) bool {
|
||||
// TODO: useful in parser?
|
||||
return false
|
||||
}
|
||||
|
||||
// IsExpectedToken checks whether symbol can follow the current state in the
|
||||
// {ATN}. The behavior of p.method is equivalent to the following, but is
|
||||
// implemented such that the complete context-sensitive follow set does not
|
||||
// need to be explicitly constructed.
|
||||
//
|
||||
// return getExpectedTokens().contains(symbol)
|
||||
func (p *BaseParser) IsExpectedToken(symbol int) bool {
|
||||
atn := p.Interpreter.atn
|
||||
ctx := p.ctx
|
||||
s := atn.states[p.state]
|
||||
following := atn.NextTokens(s, nil)
|
||||
if following.contains(symbol) {
|
||||
return true
|
||||
}
|
||||
if !following.contains(TokenEpsilon) {
|
||||
return false
|
||||
}
|
||||
for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
|
||||
invokingState := atn.states[ctx.GetInvokingState()]
|
||||
rt := invokingState.GetTransitions()[0]
|
||||
following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
|
||||
if following.contains(symbol) {
|
||||
return true
|
||||
}
|
||||
ctx = ctx.GetParent().(ParserRuleContext)
|
||||
}
|
||||
if following.contains(TokenEpsilon) && symbol == TokenEOF {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// GetExpectedTokens and returns the set of input symbols which could follow the current parser
|
||||
// state and context, as given by [GetState] and [GetContext],
|
||||
// respectively.
|
||||
func (p *BaseParser) GetExpectedTokens() *IntervalSet {
|
||||
return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
|
||||
atn := p.Interpreter.atn
|
||||
s := atn.states[p.state]
|
||||
return atn.NextTokens(s, nil)
|
||||
}
|
||||
|
||||
// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found.
|
||||
func (p *BaseParser) GetRuleIndex(ruleName string) int {
|
||||
var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
|
||||
if ok {
|
||||
return ruleIndex
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
// GetRuleInvocationStack returns a list of the rule names in your parser instance
|
||||
// leading up to a call to the current rule. You could override if
|
||||
// you want more details such as the file/line info of where
|
||||
// in the ATN a rule is invoked.
|
||||
func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
|
||||
if c == nil {
|
||||
c = p.ctx
|
||||
}
|
||||
stack := make([]string, 0)
|
||||
for c != nil {
|
||||
// compute what follows who invoked us
|
||||
ruleIndex := c.GetRuleIndex()
|
||||
if ruleIndex < 0 {
|
||||
stack = append(stack, "n/a")
|
||||
} else {
|
||||
stack = append(stack, p.GetRuleNames()[ruleIndex])
|
||||
}
|
||||
|
||||
vp := c.GetParent()
|
||||
|
||||
if vp == nil {
|
||||
break
|
||||
}
|
||||
|
||||
c = vp.(ParserRuleContext)
|
||||
}
|
||||
return stack
|
||||
}
|
||||
|
||||
// GetDFAStrings returns a list of all DFA states used for debugging purposes
|
||||
func (p *BaseParser) GetDFAStrings() string {
|
||||
return fmt.Sprint(p.Interpreter.decisionToDFA)
|
||||
}
|
||||
|
||||
// DumpDFA prints the whole of the DFA for debugging
|
||||
func (p *BaseParser) DumpDFA() {
|
||||
seenOne := false
|
||||
for _, dfa := range p.Interpreter.decisionToDFA {
|
||||
if dfa.Len() > 0 {
|
||||
if seenOne {
|
||||
fmt.Println()
|
||||
}
|
||||
fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
|
||||
fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
|
||||
seenOne = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetSourceName() string {
|
||||
return p.GrammarFileName
|
||||
}
|
||||
|
||||
// SetTrace installs a trace listener for the parse.
|
||||
//
|
||||
// During a parse it is sometimes useful to listen in on the rule entry and exit
|
||||
// events as well as token Matches. This is for quick and dirty debugging.
|
||||
func (p *BaseParser) SetTrace(trace *TraceListener) {
|
||||
if trace == nil {
|
||||
p.RemoveParseListener(p.tracer)
|
||||
p.tracer = nil
|
||||
} else {
|
||||
if p.tracer != nil {
|
||||
p.RemoveParseListener(p.tracer)
|
||||
}
|
||||
p.tracer = NewTraceListener(p)
|
||||
p.AddParseListener(p.tracer)
|
||||
}
|
||||
}
|
1668
e2e/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
generated
vendored
Normal file
1668
e2e/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
421
e2e/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
generated
vendored
Normal file
421
e2e/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
generated
vendored
Normal file
@ -0,0 +1,421 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type ParserRuleContext interface {
|
||||
RuleContext
|
||||
|
||||
SetException(RecognitionException)
|
||||
|
||||
AddTokenNode(token Token) *TerminalNodeImpl
|
||||
AddErrorNode(badToken Token) *ErrorNodeImpl
|
||||
|
||||
EnterRule(listener ParseTreeListener)
|
||||
ExitRule(listener ParseTreeListener)
|
||||
|
||||
SetStart(Token)
|
||||
GetStart() Token
|
||||
|
||||
SetStop(Token)
|
||||
GetStop() Token
|
||||
|
||||
AddChild(child RuleContext) RuleContext
|
||||
RemoveLastChild()
|
||||
}
|
||||
|
||||
type BaseParserRuleContext struct {
|
||||
parentCtx RuleContext
|
||||
invokingState int
|
||||
RuleIndex int
|
||||
|
||||
start, stop Token
|
||||
exception RecognitionException
|
||||
children []Tree
|
||||
}
|
||||
|
||||
func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
|
||||
prc := new(BaseParserRuleContext)
|
||||
InitBaseParserRuleContext(prc, parent, invokingStateNumber)
|
||||
return prc
|
||||
}
|
||||
|
||||
func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) {
|
||||
// What context invoked b rule?
|
||||
prc.parentCtx = parent
|
||||
|
||||
// What state invoked the rule associated with b context?
|
||||
// The "return address" is the followState of invokingState
|
||||
// If parent is nil, b should be -1.
|
||||
if parent == nil {
|
||||
prc.invokingState = -1
|
||||
} else {
|
||||
prc.invokingState = invokingStateNumber
|
||||
}
|
||||
|
||||
prc.RuleIndex = -1
|
||||
// * If we are debugging or building a parse tree for a Visitor,
|
||||
// we need to track all of the tokens and rule invocations associated
|
||||
// with prc rule's context. This is empty for parsing w/o tree constr.
|
||||
// operation because we don't the need to track the details about
|
||||
// how we parse prc rule.
|
||||
// /
|
||||
prc.children = nil
|
||||
prc.start = nil
|
||||
prc.stop = nil
|
||||
// The exception that forced prc rule to return. If the rule successfully
|
||||
// completed, prc is {@code nil}.
|
||||
prc.exception = nil
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
|
||||
prc.exception = e
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetChildren() []Tree {
|
||||
return prc.children
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) {
|
||||
// from RuleContext
|
||||
prc.parentCtx = ctx.parentCtx
|
||||
prc.invokingState = ctx.invokingState
|
||||
prc.children = nil
|
||||
prc.start = ctx.start
|
||||
prc.stop = ctx.stop
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetText() string {
|
||||
if prc.GetChildCount() == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
var s string
|
||||
for _, child := range prc.children {
|
||||
s += child.(ParseTree).GetText()
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// EnterRule is called when any rule is entered.
|
||||
func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) {
|
||||
}
|
||||
|
||||
// ExitRule is called when any rule is exited.
|
||||
func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) {
|
||||
}
|
||||
|
||||
// * Does not set parent link other add methods do that
|
||||
func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
|
||||
if prc.children == nil {
|
||||
prc.children = make([]Tree, 0)
|
||||
}
|
||||
if child == nil {
|
||||
panic("Child may not be null")
|
||||
}
|
||||
prc.children = append(prc.children, child)
|
||||
return child
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
|
||||
if prc.children == nil {
|
||||
prc.children = make([]Tree, 0)
|
||||
}
|
||||
if child == nil {
|
||||
panic("Child may not be null")
|
||||
}
|
||||
prc.children = append(prc.children, child)
|
||||
return child
|
||||
}
|
||||
|
||||
// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as
|
||||
// we entered a rule. If we have a label, we will need to remove
|
||||
// the generic ruleContext object.
|
||||
func (prc *BaseParserRuleContext) RemoveLastChild() {
|
||||
if prc.children != nil && len(prc.children) > 0 {
|
||||
prc.children = prc.children[0 : len(prc.children)-1]
|
||||
}
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl {
|
||||
|
||||
node := NewTerminalNodeImpl(token)
|
||||
prc.addTerminalNodeChild(node)
|
||||
node.parentCtx = prc
|
||||
return node
|
||||
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl {
|
||||
node := NewErrorNodeImpl(badToken)
|
||||
prc.addTerminalNodeChild(node)
|
||||
node.parentCtx = prc
|
||||
return node
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetChild(i int) Tree {
|
||||
if prc.children != nil && len(prc.children) >= i {
|
||||
return prc.children[i]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext {
|
||||
if childType == nil {
|
||||
return prc.GetChild(i).(RuleContext)
|
||||
}
|
||||
|
||||
for j := 0; j < len(prc.children); j++ {
|
||||
child := prc.children[j]
|
||||
if reflect.TypeOf(child) == childType {
|
||||
if i == 0 {
|
||||
return child.(RuleContext)
|
||||
}
|
||||
|
||||
i--
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string {
|
||||
return TreesStringTree(prc, ruleNames, recog)
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetRuleContext() RuleContext {
|
||||
return prc
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} {
|
||||
return visitor.VisitChildren(prc)
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetStart(t Token) {
|
||||
prc.start = t
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetStart() Token {
|
||||
return prc.start
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetStop(t Token) {
|
||||
prc.stop = t
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetStop() Token {
|
||||
return prc.stop
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode {
|
||||
|
||||
for j := 0; j < len(prc.children); j++ {
|
||||
child := prc.children[j]
|
||||
if c2, ok := child.(TerminalNode); ok {
|
||||
if c2.GetSymbol().GetTokenType() == ttype {
|
||||
if i == 0 {
|
||||
return c2
|
||||
}
|
||||
|
||||
i--
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode {
|
||||
if prc.children == nil {
|
||||
return make([]TerminalNode, 0)
|
||||
}
|
||||
|
||||
tokens := make([]TerminalNode, 0)
|
||||
|
||||
for j := 0; j < len(prc.children); j++ {
|
||||
child := prc.children[j]
|
||||
if tchild, ok := child.(TerminalNode); ok {
|
||||
if tchild.GetSymbol().GetTokenType() == ttype {
|
||||
tokens = append(tokens, tchild)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return tokens
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetPayload() interface{} {
|
||||
return prc
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext {
|
||||
if prc.children == nil || i < 0 || i >= len(prc.children) {
|
||||
return nil
|
||||
}
|
||||
|
||||
j := -1 // what element have we found with ctxType?
|
||||
for _, o := range prc.children {
|
||||
|
||||
childType := reflect.TypeOf(o)
|
||||
|
||||
if childType.Implements(ctxType) {
|
||||
j++
|
||||
if j == i {
|
||||
return o.(RuleContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do
|
||||
// check for convertibility
|
||||
|
||||
func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext {
|
||||
return prc.getChild(ctxType, i)
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext {
|
||||
if prc.children == nil {
|
||||
return make([]RuleContext, 0)
|
||||
}
|
||||
|
||||
contexts := make([]RuleContext, 0)
|
||||
|
||||
for _, child := range prc.children {
|
||||
childType := reflect.TypeOf(child)
|
||||
|
||||
if childType.ConvertibleTo(ctxType) {
|
||||
contexts = append(contexts, child.(RuleContext))
|
||||
}
|
||||
}
|
||||
return contexts
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetChildCount() int {
|
||||
if prc.children == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return len(prc.children)
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetSourceInterval() Interval {
|
||||
if prc.start == nil || prc.stop == nil {
|
||||
return TreeInvalidInterval
|
||||
}
|
||||
|
||||
return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex())
|
||||
}
|
||||
|
||||
//need to manage circular dependencies, so export now
|
||||
|
||||
// Print out a whole tree, not just a node, in LISP format
|
||||
// (root child1 .. childN). Print just a node if b is a leaf.
|
||||
//
|
||||
|
||||
func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string {
|
||||
|
||||
var p ParserRuleContext = prc
|
||||
s := "["
|
||||
for p != nil && p != stop {
|
||||
if ruleNames == nil {
|
||||
if !p.IsEmpty() {
|
||||
s += strconv.Itoa(p.GetInvokingState())
|
||||
}
|
||||
} else {
|
||||
ri := p.GetRuleIndex()
|
||||
var ruleName string
|
||||
if ri >= 0 && ri < len(ruleNames) {
|
||||
ruleName = ruleNames[ri]
|
||||
} else {
|
||||
ruleName = strconv.Itoa(ri)
|
||||
}
|
||||
s += ruleName
|
||||
}
|
||||
if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) {
|
||||
s += " "
|
||||
}
|
||||
pi := p.GetParent()
|
||||
if pi != nil {
|
||||
p = pi.(ParserRuleContext)
|
||||
} else {
|
||||
p = nil
|
||||
}
|
||||
}
|
||||
s += "]"
|
||||
return s
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetParent(v Tree) {
|
||||
if v == nil {
|
||||
prc.parentCtx = nil
|
||||
} else {
|
||||
prc.parentCtx = v.(RuleContext)
|
||||
}
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetInvokingState() int {
|
||||
return prc.invokingState
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetInvokingState(t int) {
|
||||
prc.invokingState = t
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetRuleIndex() int {
|
||||
return prc.RuleIndex
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetAltNumber() int {
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetAltNumber(_ int) {}
|
||||
|
||||
// IsEmpty returns true if the context of b is empty.
|
||||
//
|
||||
// A context is empty if there is no invoking state, meaning nobody calls
|
||||
// current context.
|
||||
func (prc *BaseParserRuleContext) IsEmpty() bool {
|
||||
return prc.invokingState == -1
|
||||
}
|
||||
|
||||
// GetParent returns the combined text of all child nodes. This method only considers
|
||||
// tokens which have been added to the parse tree.
|
||||
//
|
||||
// Since tokens on hidden channels (e.g. whitespace or comments) are not
|
||||
// added to the parse trees, they will not appear in the output of this
|
||||
// method.
|
||||
func (prc *BaseParserRuleContext) GetParent() Tree {
|
||||
return prc.parentCtx
|
||||
}
|
||||
|
||||
var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1)
|
||||
|
||||
type InterpreterRuleContext interface {
|
||||
ParserRuleContext
|
||||
}
|
||||
|
||||
type BaseInterpreterRuleContext struct {
|
||||
*BaseParserRuleContext
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
|
||||
|
||||
prc := new(BaseInterpreterRuleContext)
|
||||
|
||||
prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber)
|
||||
|
||||
prc.RuleIndex = ruleIndex
|
||||
|
||||
return prc
|
||||
}
|
727
e2e/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
generated
vendored
Normal file
727
e2e/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
generated
vendored
Normal file
@ -0,0 +1,727 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"golang.org/x/exp/slices"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var _emptyPredictionContextHash int
|
||||
|
||||
func init() {
|
||||
_emptyPredictionContextHash = murmurInit(1)
|
||||
_emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
|
||||
}
|
||||
|
||||
func calculateEmptyHash() int {
|
||||
return _emptyPredictionContextHash
|
||||
}
|
||||
|
||||
const (
|
||||
// BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $
|
||||
// doesn't mean wildcard:
|
||||
//
|
||||
// $ + x = [$,x]
|
||||
//
|
||||
// Here,
|
||||
//
|
||||
// $ = EmptyReturnState
|
||||
BasePredictionContextEmptyReturnState = 0x7FFFFFFF
|
||||
)
|
||||
|
||||
// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here
|
||||
//
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var (
|
||||
BasePredictionContextglobalNodeCount = 1
|
||||
BasePredictionContextid = BasePredictionContextglobalNodeCount
|
||||
)
|
||||
|
||||
const (
|
||||
PredictionContextEmpty = iota
|
||||
PredictionContextSingleton
|
||||
PredictionContextArray
|
||||
)
|
||||
|
||||
// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to
|
||||
// emulate inheritance from Java, and can be used without an interface definition. An interface
|
||||
// is not required because no user code will ever need to implement this interface.
|
||||
type PredictionContext struct {
|
||||
cachedHash int
|
||||
pcType int
|
||||
parentCtx *PredictionContext
|
||||
returnState int
|
||||
parents []*PredictionContext
|
||||
returnStates []int
|
||||
}
|
||||
|
||||
func NewEmptyPredictionContext() *PredictionContext {
|
||||
nep := &PredictionContext{}
|
||||
nep.cachedHash = calculateEmptyHash()
|
||||
nep.pcType = PredictionContextEmpty
|
||||
nep.returnState = BasePredictionContextEmptyReturnState
|
||||
return nep
|
||||
}
|
||||
|
||||
func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext {
|
||||
pc := &PredictionContext{}
|
||||
pc.pcType = PredictionContextSingleton
|
||||
pc.returnState = returnState
|
||||
pc.parentCtx = parent
|
||||
if parent != nil {
|
||||
pc.cachedHash = calculateHash(parent, returnState)
|
||||
} else {
|
||||
pc.cachedHash = calculateEmptyHash()
|
||||
}
|
||||
return pc
|
||||
}
|
||||
|
||||
func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext {
|
||||
if returnState == BasePredictionContextEmptyReturnState && parent == nil {
|
||||
// someone can pass in the bits of an array ctx that mean $
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
return NewBaseSingletonPredictionContext(parent, returnState)
|
||||
}
|
||||
|
||||
func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext {
|
||||
// Parent can be nil only if full ctx mode and we make an array
|
||||
// from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
|
||||
// nil parent and
|
||||
// returnState == {@link //EmptyReturnState}.
|
||||
hash := murmurInit(1)
|
||||
for _, parent := range parents {
|
||||
hash = murmurUpdate(hash, parent.Hash())
|
||||
}
|
||||
for _, returnState := range returnStates {
|
||||
hash = murmurUpdate(hash, returnState)
|
||||
}
|
||||
hash = murmurFinish(hash, len(parents)<<1)
|
||||
|
||||
nec := &PredictionContext{}
|
||||
nec.cachedHash = hash
|
||||
nec.pcType = PredictionContextArray
|
||||
nec.parents = parents
|
||||
nec.returnStates = returnStates
|
||||
return nec
|
||||
}
|
||||
|
||||
func (p *PredictionContext) Hash() int {
|
||||
return p.cachedHash
|
||||
}
|
||||
|
||||
func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool {
|
||||
switch p.pcType {
|
||||
case PredictionContextEmpty:
|
||||
otherP := other.(*PredictionContext)
|
||||
return other == nil || otherP == nil || otherP.isEmpty()
|
||||
case PredictionContextSingleton:
|
||||
return p.SingletonEquals(other)
|
||||
case PredictionContextArray:
|
||||
return p.ArrayEquals(other)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool {
|
||||
if o == nil {
|
||||
return false
|
||||
}
|
||||
other := o.(*PredictionContext)
|
||||
if other == nil || other.pcType != PredictionContextArray {
|
||||
return false
|
||||
}
|
||||
if p.cachedHash != other.Hash() {
|
||||
return false // can't be same if hash is different
|
||||
}
|
||||
|
||||
// Must compare the actual array elements and not just the array address
|
||||
//
|
||||
return slices.Equal(p.returnStates, other.returnStates) &&
|
||||
slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool {
|
||||
return x.Equals(y)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool {
|
||||
if other == nil {
|
||||
return false
|
||||
}
|
||||
otherP := other.(*PredictionContext)
|
||||
if otherP == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if p.cachedHash != otherP.Hash() {
|
||||
return false // Can't be same if hash is different
|
||||
}
|
||||
|
||||
if p.returnState != otherP.getReturnState(0) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Both parents must be nil if one is
|
||||
if p.parentCtx == nil {
|
||||
return otherP.parentCtx == nil
|
||||
}
|
||||
|
||||
return p.parentCtx.Equals(otherP.parentCtx)
|
||||
}
|
||||
|
||||
func (p *PredictionContext) GetParent(i int) *PredictionContext {
|
||||
switch p.pcType {
|
||||
case PredictionContextEmpty:
|
||||
return nil
|
||||
case PredictionContextSingleton:
|
||||
return p.parentCtx
|
||||
case PredictionContextArray:
|
||||
return p.parents[i]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PredictionContext) getReturnState(i int) int {
|
||||
switch p.pcType {
|
||||
case PredictionContextArray:
|
||||
return p.returnStates[i]
|
||||
default:
|
||||
return p.returnState
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PredictionContext) GetReturnStates() []int {
|
||||
switch p.pcType {
|
||||
case PredictionContextArray:
|
||||
return p.returnStates
|
||||
default:
|
||||
return []int{p.returnState}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PredictionContext) length() int {
|
||||
switch p.pcType {
|
||||
case PredictionContextArray:
|
||||
return len(p.returnStates)
|
||||
default:
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PredictionContext) hasEmptyPath() bool {
|
||||
switch p.pcType {
|
||||
case PredictionContextSingleton:
|
||||
return p.returnState == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
|
||||
func (p *PredictionContext) String() string {
|
||||
switch p.pcType {
|
||||
case PredictionContextEmpty:
|
||||
return "$"
|
||||
case PredictionContextSingleton:
|
||||
var up string
|
||||
|
||||
if p.parentCtx == nil {
|
||||
up = ""
|
||||
} else {
|
||||
up = p.parentCtx.String()
|
||||
}
|
||||
|
||||
if len(up) == 0 {
|
||||
if p.returnState == BasePredictionContextEmptyReturnState {
|
||||
return "$"
|
||||
}
|
||||
|
||||
return strconv.Itoa(p.returnState)
|
||||
}
|
||||
|
||||
return strconv.Itoa(p.returnState) + " " + up
|
||||
case PredictionContextArray:
|
||||
if p.isEmpty() {
|
||||
return "[]"
|
||||
}
|
||||
|
||||
s := "["
|
||||
for i := 0; i < len(p.returnStates); i++ {
|
||||
if i > 0 {
|
||||
s = s + ", "
|
||||
}
|
||||
if p.returnStates[i] == BasePredictionContextEmptyReturnState {
|
||||
s = s + "$"
|
||||
continue
|
||||
}
|
||||
s = s + strconv.Itoa(p.returnStates[i])
|
||||
if !p.parents[i].isEmpty() {
|
||||
s = s + " " + p.parents[i].String()
|
||||
} else {
|
||||
s = s + "nil"
|
||||
}
|
||||
}
|
||||
return s + "]"
|
||||
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PredictionContext) isEmpty() bool {
|
||||
switch p.pcType {
|
||||
case PredictionContextEmpty:
|
||||
return true
|
||||
case PredictionContextArray:
|
||||
// since EmptyReturnState can only appear in the last position, we
|
||||
// don't need to verify that size==1
|
||||
return p.returnStates[0] == BasePredictionContextEmptyReturnState
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PredictionContext) Type() int {
|
||||
return p.pcType
|
||||
}
|
||||
|
||||
func calculateHash(parent *PredictionContext, returnState int) int {
|
||||
h := murmurInit(1)
|
||||
h = murmurUpdate(h, parent.Hash())
|
||||
h = murmurUpdate(h, returnState)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
|
||||
// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
|
||||
// /
|
||||
func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext {
|
||||
if outerContext == nil {
|
||||
outerContext = ParserRuleContextEmpty
|
||||
}
|
||||
// if we are in RuleContext of start rule, s, then BasePredictionContext
|
||||
// is EMPTY. Nobody called us. (if we are empty, return empty)
|
||||
if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
// If we have a parent, convert it to a BasePredictionContext graph
|
||||
parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
|
||||
state := a.states[outerContext.GetInvokingState()]
|
||||
transition := state.GetTransitions()[0]
|
||||
|
||||
return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
|
||||
}
|
||||
|
||||
func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
|
||||
|
||||
// Share same graph if both same
|
||||
//
|
||||
if a == b || a.Equals(b) {
|
||||
return a
|
||||
}
|
||||
|
||||
if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton {
|
||||
return mergeSingletons(a, b, rootIsWildcard, mergeCache)
|
||||
}
|
||||
// At least one of a or b is array
|
||||
// If one is $ and rootIsWildcard, return $ as wildcard
|
||||
if rootIsWildcard {
|
||||
if a.isEmpty() {
|
||||
return a
|
||||
}
|
||||
if b.isEmpty() {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
// Convert either Singleton or Empty to arrays, so that we can merge them
|
||||
//
|
||||
ara := convertToArray(a)
|
||||
arb := convertToArray(b)
|
||||
return mergeArrays(ara, arb, rootIsWildcard, mergeCache)
|
||||
}
|
||||
|
||||
func convertToArray(pc *PredictionContext) *PredictionContext {
|
||||
switch pc.Type() {
|
||||
case PredictionContextEmpty:
|
||||
return NewArrayPredictionContext([]*PredictionContext{}, []int{})
|
||||
case PredictionContextSingleton:
|
||||
return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)})
|
||||
default:
|
||||
// Already an array
|
||||
}
|
||||
return pc
|
||||
}
|
||||
|
||||
// mergeSingletons merges two Singleton [PredictionContext] instances.
|
||||
//
|
||||
// Stack tops equal, parents merge is same return left graph.
|
||||
// <embed src="images/SingletonMerge_SameRootSamePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Same stack top, parents differ merge parents giving array node, then
|
||||
// remainders of those graphs. A new root node is created to point to the
|
||||
// merged parents.<br>
|
||||
// <embed src="images/SingletonMerge_SameRootDiffPar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Different stack tops pointing to same parent. Make array node for the
|
||||
// root where both element in the root point to the same (original)
|
||||
// parent.<br>
|
||||
// <embed src="images/SingletonMerge_DiffRootSamePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Different stack tops pointing to different parents. Make array node for
|
||||
// the root where each element points to the corresponding original
|
||||
// parent.<br>
|
||||
// <embed src="images/SingletonMerge_DiffRootDiffPar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// @param a the first {@link SingletonBasePredictionContext}
|
||||
// @param b the second {@link SingletonBasePredictionContext}
|
||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
|
||||
// otherwise false to indicate a full-context merge
|
||||
// @param mergeCache
|
||||
// /
|
||||
func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
|
||||
if mergeCache != nil {
|
||||
previous, present := mergeCache.Get(a, b)
|
||||
if present {
|
||||
return previous
|
||||
}
|
||||
previous, present = mergeCache.Get(b, a)
|
||||
if present {
|
||||
return previous
|
||||
}
|
||||
}
|
||||
|
||||
rootMerge := mergeRoot(a, b, rootIsWildcard)
|
||||
if rootMerge != nil {
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, rootMerge)
|
||||
}
|
||||
return rootMerge
|
||||
}
|
||||
if a.returnState == b.returnState {
|
||||
parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
|
||||
// if parent is same as existing a or b parent or reduced to a parent,
|
||||
// return it
|
||||
if parent.Equals(a.parentCtx) {
|
||||
return a // ax + bx = ax, if a=b
|
||||
}
|
||||
if parent.Equals(b.parentCtx) {
|
||||
return b // ax + bx = bx, if a=b
|
||||
}
|
||||
// else: ax + ay = a'[x,y]
|
||||
// merge parents x and y, giving array node with x,y then remainders
|
||||
// of those graphs. dup a, a' points at merged array.
|
||||
// New joined parent so create a new singleton pointing to it, a'
|
||||
spc := SingletonBasePredictionContextCreate(parent, a.returnState)
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, spc)
|
||||
}
|
||||
return spc
|
||||
}
|
||||
// a != b payloads differ
|
||||
// see if we can collapse parents due to $+x parents if local ctx
|
||||
var singleParent *PredictionContext
|
||||
if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax +
|
||||
// bx =
|
||||
// [a,b]x
|
||||
singleParent = a.parentCtx
|
||||
}
|
||||
if singleParent != nil { // parents are same
|
||||
// sort payloads and use same parent
|
||||
payloads := []int{a.returnState, b.returnState}
|
||||
if a.returnState > b.returnState {
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
}
|
||||
parents := []*PredictionContext{singleParent, singleParent}
|
||||
apc := NewArrayPredictionContext(parents, payloads)
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, apc)
|
||||
}
|
||||
return apc
|
||||
}
|
||||
// parents differ and can't merge them. Just pack together
|
||||
// into array can't merge.
|
||||
// ax + by = [ax,by]
|
||||
payloads := []int{a.returnState, b.returnState}
|
||||
parents := []*PredictionContext{a.parentCtx, b.parentCtx}
|
||||
if a.returnState > b.returnState { // sort by payload
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
parents = []*PredictionContext{b.parentCtx, a.parentCtx}
|
||||
}
|
||||
apc := NewArrayPredictionContext(parents, payloads)
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, apc)
|
||||
}
|
||||
return apc
|
||||
}
|
||||
|
||||
// Handle case where at least one of {@code a} or {@code b} is
|
||||
// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
|
||||
// to represent {@link //EMPTY}.
|
||||
//
|
||||
// <h2>Local-Context Merges</h2>
|
||||
//
|
||||
// <p>These local-context merge operations are used when {@code rootIsWildcard}
|
||||
// is true.</p>
|
||||
//
|
||||
// <p>{@link //EMPTY} is superset of any graph return {@link //EMPTY}.<br>
|
||||
// <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
|
||||
// {@code //EMPTY} return left graph.<br>
|
||||
// <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Special case of last merge if local context.<br>
|
||||
// <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <h2>Full-Context Merges</h2>
|
||||
//
|
||||
// <p>These full-context merge operations are used when {@code rootIsWildcard}
|
||||
// is false.</p>
|
||||
//
|
||||
// <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Must keep all contexts {@link //EMPTY} in array is a special value (and
|
||||
// nil parent).<br>
|
||||
// <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// @param a the first {@link SingletonBasePredictionContext}
|
||||
// @param b the second {@link SingletonBasePredictionContext}
|
||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
|
||||
// otherwise false to indicate a full-context merge
|
||||
// /
|
||||
func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext {
|
||||
if rootIsWildcard {
|
||||
if a.pcType == PredictionContextEmpty {
|
||||
return BasePredictionContextEMPTY // // + b =//
|
||||
}
|
||||
if b.pcType == PredictionContextEmpty {
|
||||
return BasePredictionContextEMPTY // a +// =//
|
||||
}
|
||||
} else {
|
||||
if a.isEmpty() && b.isEmpty() {
|
||||
return BasePredictionContextEMPTY // $ + $ = $
|
||||
} else if a.isEmpty() { // $ + x = [$,x]
|
||||
payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
|
||||
parents := []*PredictionContext{b.GetParent(-1), nil}
|
||||
return NewArrayPredictionContext(parents, payloads)
|
||||
} else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present)
|
||||
payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
|
||||
parents := []*PredictionContext{a.GetParent(-1), nil}
|
||||
return NewArrayPredictionContext(parents, payloads)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge two {@link ArrayBasePredictionContext} instances.
|
||||
//
|
||||
// <p>Different tops, different parents.<br>
|
||||
// <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, same parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, different parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, all shared parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopSharePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Equal tops, merge parents and reduce top to
|
||||
// {@link SingletonBasePredictionContext}.<br>
|
||||
// <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
//goland:noinspection GoBoolExpressions
|
||||
func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
|
||||
if mergeCache != nil {
|
||||
previous, present := mergeCache.Get(a, b)
|
||||
if present {
|
||||
if runtimeConfig.parserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
|
||||
}
|
||||
return previous
|
||||
}
|
||||
previous, present = mergeCache.Get(b, a)
|
||||
if present {
|
||||
if runtimeConfig.parserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
|
||||
}
|
||||
return previous
|
||||
}
|
||||
}
|
||||
// merge sorted payloads a + b => M
|
||||
i := 0 // walks a
|
||||
j := 0 // walks b
|
||||
k := 0 // walks target M array
|
||||
|
||||
mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
|
||||
mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates))
|
||||
// walk and merge to yield mergedParents, mergedReturnStates
|
||||
for i < len(a.returnStates) && j < len(b.returnStates) {
|
||||
aParent := a.parents[i]
|
||||
bParent := b.parents[j]
|
||||
if a.returnStates[i] == b.returnStates[j] {
|
||||
// same payload (stack tops are equal), must yield merged singleton
|
||||
payload := a.returnStates[i]
|
||||
// $+$ = $
|
||||
bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
|
||||
axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax
|
||||
// ->
|
||||
// ax
|
||||
if bothDollars || axAX {
|
||||
mergedParents[k] = aParent // choose left
|
||||
mergedReturnStates[k] = payload
|
||||
} else { // ax+ay -> a'[x,y]
|
||||
mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
|
||||
mergedParents[k] = mergedParent
|
||||
mergedReturnStates[k] = payload
|
||||
}
|
||||
i++ // hop over left one as usual
|
||||
j++ // but also Skip one in right side since we merge
|
||||
} else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
|
||||
mergedParents[k] = aParent
|
||||
mergedReturnStates[k] = a.returnStates[i]
|
||||
i++
|
||||
} else { // b > a, copy b[j] to M
|
||||
mergedParents[k] = bParent
|
||||
mergedReturnStates[k] = b.returnStates[j]
|
||||
j++
|
||||
}
|
||||
k++
|
||||
}
|
||||
// copy over any payloads remaining in either array
|
||||
if i < len(a.returnStates) {
|
||||
for p := i; p < len(a.returnStates); p++ {
|
||||
mergedParents[k] = a.parents[p]
|
||||
mergedReturnStates[k] = a.returnStates[p]
|
||||
k++
|
||||
}
|
||||
} else {
|
||||
for p := j; p < len(b.returnStates); p++ {
|
||||
mergedParents[k] = b.parents[p]
|
||||
mergedReturnStates[k] = b.returnStates[p]
|
||||
k++
|
||||
}
|
||||
}
|
||||
// trim merged if we combined a few that had same stack tops
|
||||
if k < len(mergedParents) { // write index < last position trim
|
||||
if k == 1 { // for just one merged element, return singleton top
|
||||
pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, pc)
|
||||
}
|
||||
return pc
|
||||
}
|
||||
mergedParents = mergedParents[0:k]
|
||||
mergedReturnStates = mergedReturnStates[0:k]
|
||||
}
|
||||
|
||||
M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
|
||||
|
||||
// if we created same array as a or b, return that instead
|
||||
// TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation
|
||||
if M.Equals(a) {
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, a)
|
||||
}
|
||||
if runtimeConfig.parserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
|
||||
}
|
||||
return a
|
||||
}
|
||||
if M.Equals(b) {
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, b)
|
||||
}
|
||||
if runtimeConfig.parserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
|
||||
}
|
||||
return b
|
||||
}
|
||||
combineCommonParents(&mergedParents)
|
||||
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, M)
|
||||
}
|
||||
if runtimeConfig.parserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
|
||||
}
|
||||
return M
|
||||
}
|
||||
|
||||
// Make pass over all M parents and merge any Equals() ones.
|
||||
// Note that we pass a pointer to the slice as we want to modify it in place.
|
||||
//
|
||||
//goland:noinspection GoUnusedFunction
|
||||
func combineCommonParents(parents *[]*PredictionContext) {
|
||||
uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext")
|
||||
|
||||
for p := 0; p < len(*parents); p++ {
|
||||
parent := (*parents)[p]
|
||||
_, _ = uniqueParents.Put(parent)
|
||||
}
|
||||
for q := 0; q < len(*parents); q++ {
|
||||
pc, _ := uniqueParents.Get((*parents)[q])
|
||||
(*parents)[q] = pc
|
||||
}
|
||||
}
|
||||
|
||||
func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext {
|
||||
if context.isEmpty() {
|
||||
return context
|
||||
}
|
||||
existing, present := visited.Get(context)
|
||||
if present {
|
||||
return existing
|
||||
}
|
||||
|
||||
existing, present = contextCache.Get(context)
|
||||
if present {
|
||||
visited.Put(context, existing)
|
||||
return existing
|
||||
}
|
||||
changed := false
|
||||
parents := make([]*PredictionContext, context.length())
|
||||
for i := 0; i < len(parents); i++ {
|
||||
parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
|
||||
if changed || !parent.Equals(context.GetParent(i)) {
|
||||
if !changed {
|
||||
parents = make([]*PredictionContext, context.length())
|
||||
for j := 0; j < context.length(); j++ {
|
||||
parents[j] = context.GetParent(j)
|
||||
}
|
||||
changed = true
|
||||
}
|
||||
parents[i] = parent
|
||||
}
|
||||
}
|
||||
if !changed {
|
||||
contextCache.add(context)
|
||||
visited.Put(context, context)
|
||||
return context
|
||||
}
|
||||
var updated *PredictionContext
|
||||
if len(parents) == 0 {
|
||||
updated = BasePredictionContextEMPTY
|
||||
} else if len(parents) == 1 {
|
||||
updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
|
||||
} else {
|
||||
updated = NewArrayPredictionContext(parents, context.GetReturnStates())
|
||||
}
|
||||
contextCache.add(updated)
|
||||
visited.Put(updated, updated)
|
||||
visited.Put(context, updated)
|
||||
|
||||
return updated
|
||||
}
|
48
e2e/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
generated
vendored
Normal file
48
e2e/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
package antlr
|
||||
|
||||
var BasePredictionContextEMPTY = &PredictionContext{
|
||||
cachedHash: calculateEmptyHash(),
|
||||
pcType: PredictionContextEmpty,
|
||||
returnState: BasePredictionContextEmptyReturnState,
|
||||
}
|
||||
|
||||
// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared
|
||||
// context cash associated with contexts in DFA states. This cache
|
||||
// can be used for both lexers and parsers.
|
||||
type PredictionContextCache struct {
|
||||
cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]]
|
||||
}
|
||||
|
||||
func NewPredictionContextCache() *PredictionContextCache {
|
||||
return &PredictionContextCache{
|
||||
cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"),
|
||||
}
|
||||
}
|
||||
|
||||
// Add a context to the cache and return it. If the context already exists,
|
||||
// return that one instead and do not add a new context to the cache.
|
||||
// Protect shared cache from unsafe thread access.
|
||||
func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext {
|
||||
if ctx.isEmpty() {
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
|
||||
// Put will return the existing entry if it is present (note this is done via Equals, not whether it is
|
||||
// the same pointer), otherwise it will add the new entry and return that.
|
||||
//
|
||||
existing, present := p.cache.Get(ctx)
|
||||
if present {
|
||||
return existing
|
||||
}
|
||||
p.cache.Put(ctx, ctx)
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) {
|
||||
pc, exists := p.cache.Get(ctx)
|
||||
return pc, exists
|
||||
}
|
||||
|
||||
func (p *PredictionContextCache) length() int {
|
||||
return p.cache.Len()
|
||||
}
|
536
e2e/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
generated
vendored
Normal file
536
e2e/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
generated
vendored
Normal file
@ -0,0 +1,536 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// This enumeration defines the prediction modes available in ANTLR 4 along with
|
||||
// utility methods for analyzing configuration sets for conflicts and/or
|
||||
// ambiguities.
|
||||
|
||||
const (
|
||||
// PredictionModeSLL represents the SLL(*) prediction mode.
|
||||
// This prediction mode ignores the current
|
||||
// parser context when making predictions. This is the fastest prediction
|
||||
// mode, and provides correct results for many grammars. This prediction
|
||||
// mode is more powerful than the prediction mode provided by ANTLR 3, but
|
||||
// may result in syntax errors for grammar and input combinations which are
|
||||
// not SLL.
|
||||
//
|
||||
// When using this prediction mode, the parser will either return a correct
|
||||
// parse tree (i.e. the same parse tree that would be returned with the
|
||||
// [PredictionModeLL] prediction mode), or it will Report a syntax error. If a
|
||||
// syntax error is encountered when using the SLL prediction mode,
|
||||
// it may be due to either an actual syntax error in the input or indicate
|
||||
// that the particular combination of grammar and input requires the more
|
||||
// powerful LL prediction abilities to complete successfully.
|
||||
//
|
||||
// This prediction mode does not provide any guarantees for prediction
|
||||
// behavior for syntactically-incorrect inputs.
|
||||
//
|
||||
PredictionModeSLL = 0
|
||||
|
||||
// PredictionModeLL represents the LL(*) prediction mode.
|
||||
// This prediction mode allows the current parser
|
||||
// context to be used for resolving SLL conflicts that occur during
|
||||
// prediction. This is the fastest prediction mode that guarantees correct
|
||||
// parse results for all combinations of grammars with syntactically correct
|
||||
// inputs.
|
||||
//
|
||||
// When using this prediction mode, the parser will make correct decisions
|
||||
// for all syntactically-correct grammar and input combinations. However, in
|
||||
// cases where the grammar is truly ambiguous this prediction mode might not
|
||||
// report a precise answer for exactly which alternatives are
|
||||
// ambiguous.
|
||||
//
|
||||
// This prediction mode does not provide any guarantees for prediction
|
||||
// behavior for syntactically-incorrect inputs.
|
||||
//
|
||||
PredictionModeLL = 1
|
||||
|
||||
// PredictionModeLLExactAmbigDetection represents the LL(*) prediction mode
|
||||
// with exact ambiguity detection.
|
||||
//
|
||||
// In addition to the correctness guarantees provided by the [PredictionModeLL] prediction mode,
|
||||
// this prediction mode instructs the prediction algorithm to determine the
|
||||
// complete and exact set of ambiguous alternatives for every ambiguous
|
||||
// decision encountered while parsing.
|
||||
//
|
||||
// This prediction mode may be used for diagnosing ambiguities during
|
||||
// grammar development. Due to the performance overhead of calculating sets
|
||||
// of ambiguous alternatives, this prediction mode should be avoided when
|
||||
// the exact results are not necessary.
|
||||
//
|
||||
// This prediction mode does not provide any guarantees for prediction
|
||||
// behavior for syntactically-incorrect inputs.
|
||||
//
|
||||
PredictionModeLLExactAmbigDetection = 2
|
||||
)
|
||||
|
||||
// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction termination condition.
|
||||
//
|
||||
// This method computes the SLL prediction termination condition for both of
|
||||
// the following cases:
|
||||
//
|
||||
// - The usual SLL+LL fallback upon SLL conflict
|
||||
// - Pure SLL without LL fallback
|
||||
//
|
||||
// # Combined SLL+LL Parsing
|
||||
//
|
||||
// When LL-fallback is enabled upon SLL conflict, correct predictions are
|
||||
// ensured regardless of how the termination condition is computed by this
|
||||
// method. Due to the substantially higher cost of LL prediction, the
|
||||
// prediction should only fall back to LL when the additional lookahead
|
||||
// cannot lead to a unique SLL prediction.
|
||||
//
|
||||
// Assuming combined SLL+LL parsing, an SLL configuration set with only
|
||||
// conflicting subsets should fall back to full LL, even if the
|
||||
// configuration sets don't resolve to the same alternative, e.g.
|
||||
//
|
||||
// {1,2} and {3,4}
|
||||
//
|
||||
// If there is at least one non-conflicting
|
||||
// configuration, SLL could continue with the hopes that more lookahead will
|
||||
// resolve via one of those non-conflicting configurations.
|
||||
//
|
||||
// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
|
||||
// stops when it sees only conflicting configuration subsets. In contrast,
|
||||
// full LL keeps going when there is uncertainty.
|
||||
//
|
||||
// # Heuristic
|
||||
//
|
||||
// As a heuristic, we stop prediction when we see any conflicting subset
|
||||
// unless we see a state that only has one alternative associated with it.
|
||||
// The single-alt-state thing lets prediction continue upon rules like
|
||||
// (otherwise, it would admit defeat too soon):
|
||||
//
|
||||
// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ;
|
||||
//
|
||||
// When the [ATN] simulation reaches the state before ';', it has a
|
||||
// [DFA] state that looks like:
|
||||
//
|
||||
// [12|1|[], 6|2|[], 12|2|[]]
|
||||
//
|
||||
// Naturally
|
||||
//
|
||||
// 12|1|[] and 12|2|[]
|
||||
//
|
||||
// conflict, but we cannot stop processing this node because alternative to has another way to continue,
|
||||
// via
|
||||
//
|
||||
// [6|2|[]]
|
||||
//
|
||||
// It also let's us continue for this rule:
|
||||
//
|
||||
// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;
|
||||
//
|
||||
// After Matching input A, we reach the stop state for rule A, state 1.
|
||||
// State 8 is the state immediately before B. Clearly alternatives 1 and 2
|
||||
// conflict and no amount of further lookahead will separate the two.
|
||||
// However, alternative 3 will be able to continue, and so we do not stop
|
||||
// working on this state. In the previous example, we're concerned with
|
||||
// states associated with the conflicting alternatives. Here alt 3 is not
|
||||
// associated with the conflicting configs, but since we can continue
|
||||
// looking for input reasonably, don't declare the state done.
|
||||
//
|
||||
// # Pure SLL Parsing
|
||||
//
|
||||
// To handle pure SLL parsing, all we have to do is make sure that we
|
||||
// combine stack contexts for configurations that differ only by semantic
|
||||
// predicate. From there, we can do the usual SLL termination heuristic.
|
||||
//
|
||||
// # Predicates in SLL+LL Parsing
|
||||
//
|
||||
// SLL decisions don't evaluate predicates until after they reach [DFA] stop
|
||||
// states because they need to create the [DFA] cache that works in all
|
||||
// semantic situations. In contrast, full LL evaluates predicates collected
|
||||
// during start state computation, so it can ignore predicates thereafter.
|
||||
// This means that SLL termination detection can totally ignore semantic
|
||||
// predicates.
|
||||
//
|
||||
// Implementation-wise, [ATNConfigSet] combines stack contexts but not
|
||||
// semantic predicate contexts, so we might see two configurations like the
|
||||
// following:
|
||||
//
|
||||
// (s, 1, x, {}), (s, 1, x', {p})
|
||||
//
|
||||
// Before testing these configurations against others, we have to merge
|
||||
// x and x' (without modifying the existing configurations).
|
||||
// For example, we test (x+x')==x” when looking for conflicts in
|
||||
// the following configurations:
|
||||
//
|
||||
// (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})
|
||||
//
|
||||
// If the configuration set has predicates (as indicated by
|
||||
// [ATNConfigSet.hasSemanticContext]), this algorithm makes a copy of
|
||||
// the configurations to strip out all the predicates so that a standard
|
||||
// [ATNConfigSet] will merge everything ignoring predicates.
|
||||
func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool {
|
||||
|
||||
// Configs in rule stop states indicate reaching the end of the decision
|
||||
// rule (local context) or end of start rule (full context). If all
|
||||
// configs meet this condition, then none of the configurations is able
|
||||
// to Match additional input, so we terminate prediction.
|
||||
//
|
||||
if PredictionModeallConfigsInRuleStopStates(configs) {
|
||||
return true
|
||||
}
|
||||
|
||||
// pure SLL mode parsing
|
||||
if mode == PredictionModeSLL {
|
||||
// Don't bother with combining configs from different semantic
|
||||
// contexts if we can fail over to full LL costs more time
|
||||
// since we'll often fail over anyway.
|
||||
if configs.hasSemanticContext {
|
||||
// dup configs, tossing out semantic predicates
|
||||
dup := NewATNConfigSet(false)
|
||||
for _, c := range configs.configs {
|
||||
|
||||
// NewATNConfig({semanticContext:}, c)
|
||||
c = NewATNConfig2(c, SemanticContextNone)
|
||||
dup.Add(c, nil)
|
||||
}
|
||||
configs = dup
|
||||
}
|
||||
// now we have combined contexts for configs with dissimilar predicates
|
||||
}
|
||||
// pure SLL or combined SLL+LL mode parsing
|
||||
altsets := PredictionModegetConflictingAltSubsets(configs)
|
||||
return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
|
||||
}
|
||||
|
||||
// PredictionModehasConfigInRuleStopState checks if any configuration in the given configs is in a
|
||||
// [RuleStopState]. Configurations meeting this condition have reached
|
||||
// the end of the decision rule (local context) or end of start rule (full
|
||||
// context).
|
||||
//
|
||||
// The func returns true if any configuration in the supplied configs is in a [RuleStopState]
|
||||
func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool {
|
||||
for _, c := range configs.configs {
|
||||
if _, ok := c.GetState().(*RuleStopState); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PredictionModeallConfigsInRuleStopStates checks if all configurations in configs are in a
|
||||
// [RuleStopState]. Configurations meeting this condition have reached
|
||||
// the end of the decision rule (local context) or end of start rule (full
|
||||
// context).
|
||||
//
|
||||
// the func returns true if all configurations in configs are in a
|
||||
// [RuleStopState]
|
||||
func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool {
|
||||
|
||||
for _, c := range configs.configs {
|
||||
if _, ok := c.GetState().(*RuleStopState); !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// PredictionModeresolvesToJustOneViableAlt checks full LL prediction termination.
|
||||
//
|
||||
// Can we stop looking ahead during [ATN] simulation or is there some
|
||||
// uncertainty as to which alternative we will ultimately pick, after
|
||||
// consuming more input? Even if there are partial conflicts, we might know
|
||||
// that everything is going to resolve to the same minimum alternative. That
|
||||
// means we can stop since no more lookahead will change that fact. On the
|
||||
// other hand, there might be multiple conflicts that resolve to different
|
||||
// minimums. That means we need more look ahead to decide which of those
|
||||
// alternatives we should predict.
|
||||
//
|
||||
// The basic idea is to split the set of configurations 'C', into
|
||||
// conflicting subsets (s, _, ctx, _) and singleton subsets with
|
||||
// non-conflicting configurations. Two configurations conflict if they have
|
||||
// identical [ATNConfig].state and [ATNConfig].context values
|
||||
// but a different [ATNConfig].alt value, e.g.
|
||||
//
|
||||
// (s, i, ctx, _)
|
||||
//
|
||||
// and
|
||||
//
|
||||
// (s, j, ctx, _) ; for i != j
|
||||
//
|
||||
// Reduce these configuration subsets to the set of possible alternatives.
|
||||
// You can compute the alternative subsets in one pass as follows:
|
||||
//
|
||||
// A_s,ctx = {i | (s, i, ctx, _)}
|
||||
//
|
||||
// for each configuration in C holding s and ctx fixed.
|
||||
//
|
||||
// Or in pseudo-code:
|
||||
//
|
||||
// for each configuration c in C:
|
||||
// map[c] U = c.ATNConfig.alt alt // map hash/equals uses s and x, not alt and not pred
|
||||
//
|
||||
// The values in map are the set of
|
||||
//
|
||||
// A_s,ctx
|
||||
//
|
||||
// sets.
|
||||
//
|
||||
// If
|
||||
//
|
||||
// |A_s,ctx| = 1
|
||||
//
|
||||
// then there is no conflict associated with s and ctx.
|
||||
//
|
||||
// Reduce the subsets to singletons by choosing a minimum of each subset. If
|
||||
// the union of these alternative subsets is a singleton, then no amount of
|
||||
// further lookahead will help us. We will always pick that alternative. If,
|
||||
// however, there is more than one alternative, then we are uncertain which
|
||||
// alternative to predict and must continue looking for resolution. We may
|
||||
// or may not discover an ambiguity in the future, even if there are no
|
||||
// conflicting subsets this round.
|
||||
//
|
||||
// The biggest sin is to terminate early because it means we've made a
|
||||
// decision but were uncertain as to the eventual outcome. We haven't used
|
||||
// enough lookahead. On the other hand, announcing a conflict too late is no
|
||||
// big deal; you will still have the conflict. It's just inefficient. It
|
||||
// might even look until the end of file.
|
||||
//
|
||||
// No special consideration for semantic predicates is required because
|
||||
// predicates are evaluated on-the-fly for full LL prediction, ensuring that
|
||||
// no configuration contains a semantic context during the termination
|
||||
// check.
|
||||
//
|
||||
// # Conflicting Configs
|
||||
//
|
||||
// Two configurations:
|
||||
//
|
||||
// (s, i, x) and (s, j, x')
|
||||
//
|
||||
// conflict when i != j but x = x'. Because we merge all
|
||||
// (s, i, _) configurations together, that means that there are at
|
||||
// most n configurations associated with state s for
|
||||
// n possible alternatives in the decision. The merged stacks
|
||||
// complicate the comparison of configuration contexts x and x'.
|
||||
//
|
||||
// Sam checks to see if one is a subset of the other by calling
|
||||
// merge and checking to see if the merged result is either x or x'.
|
||||
// If the x associated with lowest alternative i
|
||||
// is the superset, then i is the only possible prediction since the
|
||||
// others resolve to min(i) as well. However, if x is
|
||||
// associated with j > i then at least one stack configuration for
|
||||
// j is not in conflict with alternative i. The algorithm
|
||||
// should keep going, looking for more lookahead due to the uncertainty.
|
||||
//
|
||||
// For simplicity, I'm doing an equality check between x and
|
||||
// x', which lets the algorithm continue to consume lookahead longer
|
||||
// than necessary. The reason I like the equality is of course the
|
||||
// simplicity but also because that is the test you need to detect the
|
||||
// alternatives that are actually in conflict.
|
||||
//
|
||||
// # Continue/Stop Rule
|
||||
//
|
||||
// Continue if the union of resolved alternative sets from non-conflicting and
|
||||
// conflicting alternative subsets has more than one alternative. We are
|
||||
// uncertain about which alternative to predict.
|
||||
//
|
||||
// The complete set of alternatives,
|
||||
//
|
||||
// [i for (_, i, _)]
|
||||
//
|
||||
// tells us which alternatives are still in the running for the amount of input we've
|
||||
// consumed at this point. The conflicting sets let us to strip away
|
||||
// configurations that won't lead to more states because we resolve
|
||||
// conflicts to the configuration with a minimum alternate for the
|
||||
// conflicting set.
|
||||
//
|
||||
// Cases
|
||||
//
|
||||
// - no conflicts and more than 1 alternative in set => continue
|
||||
// - (s, 1, x), (s, 2, x), (s, 3, z), (s', 1, y), (s', 2, y) yields non-conflicting set
|
||||
// {3} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1,3} => continue
|
||||
// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y), (s”, 1, z) yields non-conflicting set
|
||||
// {1} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1} => stop and predict 1
|
||||
// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y) yields conflicting, reduced sets
|
||||
// {1} ∪ {1} = {1} => stop and predict 1, can announce ambiguity {1,2}
|
||||
// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
|
||||
// {1} ∪ {2} = {1,2} => continue
|
||||
// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
|
||||
// {1} ∪ {2} = {1,2} => continue
|
||||
// - (s, 1, x), (s, 2, x), (s', 3, y), (s', 4, y) yields conflicting, reduced sets
|
||||
// {1} ∪ {3} = {1,3} => continue
|
||||
//
|
||||
// # Exact Ambiguity Detection
|
||||
//
|
||||
// If all states report the same conflicting set of alternatives, then we
|
||||
// know we have the exact ambiguity set:
|
||||
//
|
||||
// |A_i| > 1
|
||||
//
|
||||
// and
|
||||
//
|
||||
// A_i = A_j ; for all i, j
|
||||
//
|
||||
// In other words, we continue examining lookahead until all A_i
|
||||
// have more than one alternative and all A_i are the same. If
|
||||
//
|
||||
// A={{1,2}, {1,3}}
|
||||
//
|
||||
// then regular LL prediction would terminate because the resolved set is {1}.
|
||||
// To determine what the real ambiguity is, we have to know whether the ambiguity is between one and
|
||||
// two or one and three so we keep going. We can only stop prediction when
|
||||
// we need exact ambiguity detection when the sets look like:
|
||||
//
|
||||
// A={{1,2}}
|
||||
//
|
||||
// or
|
||||
//
|
||||
// {{1,2},{1,2}}, etc...
|
||||
func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
|
||||
return PredictionModegetSingleViableAlt(altsets)
|
||||
}
|
||||
|
||||
// PredictionModeallSubsetsConflict determines if every alternative subset in altsets contains more
|
||||
// than one alternative.
|
||||
//
|
||||
// The func returns true if every [BitSet] in altsets has
|
||||
// [BitSet].cardinality cardinality > 1
|
||||
func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
|
||||
return !PredictionModehasNonConflictingAltSet(altsets)
|
||||
}
|
||||
|
||||
// PredictionModehasNonConflictingAltSet determines if any single alternative subset in altsets contains
|
||||
// exactly one alternative.
|
||||
//
|
||||
// The func returns true if altsets contains at least one [BitSet] with
|
||||
// [BitSet].cardinality cardinality 1
|
||||
func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
if alts.length() == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PredictionModehasConflictingAltSet determines if any single alternative subset in altsets contains
|
||||
// more than one alternative.
|
||||
//
|
||||
// The func returns true if altsets contains a [BitSet] with
|
||||
// [BitSet].cardinality cardinality > 1, otherwise false
|
||||
func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
if alts.length() > 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PredictionModeallSubsetsEqual determines if every alternative subset in altsets is equivalent.
|
||||
//
|
||||
// The func returns true if every member of altsets is equal to the others.
|
||||
func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
|
||||
var first *BitSet
|
||||
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
if first == nil {
|
||||
first = alts
|
||||
} else if alts != first {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// PredictionModegetUniqueAlt returns the unique alternative predicted by all alternative subsets in
|
||||
// altsets. If no such alternative exists, this method returns
|
||||
// [ATNInvalidAltNumber].
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
func PredictionModegetUniqueAlt(altsets []*BitSet) int {
|
||||
all := PredictionModeGetAlts(altsets)
|
||||
if all.length() == 1 {
|
||||
return all.minValue()
|
||||
}
|
||||
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
|
||||
// PredictionModeGetAlts returns the complete set of represented alternatives for a collection of
|
||||
// alternative subsets. This method returns the union of each [BitSet]
|
||||
// in altsets, being the set of represented alternatives in altsets.
|
||||
func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
|
||||
all := NewBitSet()
|
||||
for _, alts := range altsets {
|
||||
all.or(alts)
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
|
||||
//
|
||||
// for each configuration c in configs:
|
||||
// map[c] U= c.ATNConfig.alt // map hash/equals uses s and x, not alt and not pred
|
||||
func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet {
|
||||
configToAlts := NewJMap[*ATNConfig, *BitSet, *ATNAltConfigComparator[*ATNConfig]](atnAltCfgEqInst, AltSetCollection, "PredictionModegetConflictingAltSubsets()")
|
||||
|
||||
for _, c := range configs.configs {
|
||||
|
||||
alts, ok := configToAlts.Get(c)
|
||||
if !ok {
|
||||
alts = NewBitSet()
|
||||
configToAlts.Put(c, alts)
|
||||
}
|
||||
alts.add(c.GetAlt())
|
||||
}
|
||||
|
||||
return configToAlts.Values()
|
||||
}
|
||||
|
||||
// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set.
|
||||
//
|
||||
// for each configuration c in configs:
|
||||
// map[c.ATNConfig.state] U= c.ATNConfig.alt}
|
||||
func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict {
|
||||
m := NewAltDict()
|
||||
|
||||
for _, c := range configs.configs {
|
||||
alts := m.Get(c.GetState().String())
|
||||
if alts == nil {
|
||||
alts = NewBitSet()
|
||||
m.put(c.GetState().String(), alts)
|
||||
}
|
||||
alts.(*BitSet).add(c.GetAlt())
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool {
|
||||
values := PredictionModeGetStateToAltMap(configs).values()
|
||||
for i := 0; i < len(values); i++ {
|
||||
if values[i].(*BitSet).length() == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PredictionModegetSingleViableAlt gets the single alternative predicted by all alternative subsets in altsets
|
||||
// if there is one.
|
||||
//
|
||||
// TODO: JI - Review this code - it does not seem to do the same thing as the Java code - maybe because [BitSet] is not like the Java utils BitSet
|
||||
func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
|
||||
result := ATNInvalidAltNumber
|
||||
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
minAlt := alts.minValue()
|
||||
if result == ATNInvalidAltNumber {
|
||||
result = minAlt
|
||||
} else if result != minAlt { // more than 1 viable alt
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
241
e2e/vendor/github.com/antlr4-go/antlr/v4/recognizer.go
generated
vendored
Normal file
241
e2e/vendor/github.com/antlr4-go/antlr/v4/recognizer.go
generated
vendored
Normal file
@ -0,0 +1,241 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Recognizer interface {
|
||||
GetLiteralNames() []string
|
||||
GetSymbolicNames() []string
|
||||
GetRuleNames() []string
|
||||
|
||||
Sempred(RuleContext, int, int) bool
|
||||
Precpred(RuleContext, int) bool
|
||||
|
||||
GetState() int
|
||||
SetState(int)
|
||||
Action(RuleContext, int, int)
|
||||
AddErrorListener(ErrorListener)
|
||||
RemoveErrorListeners()
|
||||
GetATN() *ATN
|
||||
GetErrorListenerDispatch() ErrorListener
|
||||
HasError() bool
|
||||
GetError() RecognitionException
|
||||
SetError(RecognitionException)
|
||||
}
|
||||
|
||||
type BaseRecognizer struct {
|
||||
listeners []ErrorListener
|
||||
state int
|
||||
|
||||
RuleNames []string
|
||||
LiteralNames []string
|
||||
SymbolicNames []string
|
||||
GrammarFileName string
|
||||
SynErr RecognitionException
|
||||
}
|
||||
|
||||
func NewBaseRecognizer() *BaseRecognizer {
|
||||
rec := new(BaseRecognizer)
|
||||
rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
|
||||
rec.state = -1
|
||||
return rec
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var tokenTypeMapCache = make(map[string]int)
|
||||
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var ruleIndexMapCache = make(map[string]int)
|
||||
|
||||
func (b *BaseRecognizer) checkVersion(toolVersion string) {
|
||||
runtimeVersion := "4.12.0"
|
||||
if runtimeVersion != toolVersion {
|
||||
fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) SetError(err RecognitionException) {
|
||||
b.SynErr = err
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) HasError() bool {
|
||||
return b.SynErr != nil
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetError() RecognitionException {
|
||||
return b.SynErr
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) {
|
||||
panic("action not implemented on Recognizer!")
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
|
||||
b.listeners = append(b.listeners, listener)
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) RemoveErrorListeners() {
|
||||
b.listeners = make([]ErrorListener, 0)
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetRuleNames() []string {
|
||||
return b.RuleNames
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetTokenNames() []string {
|
||||
return b.LiteralNames
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetSymbolicNames() []string {
|
||||
return b.SymbolicNames
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetLiteralNames() []string {
|
||||
return b.LiteralNames
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetState() int {
|
||||
return b.state
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) SetState(v int) {
|
||||
b.state = v
|
||||
}
|
||||
|
||||
//func (b *Recognizer) GetTokenTypeMap() {
|
||||
// var tokenNames = b.GetTokenNames()
|
||||
// if (tokenNames==nil) {
|
||||
// panic("The current recognizer does not provide a list of token names.")
|
||||
// }
|
||||
// var result = tokenTypeMapCache[tokenNames]
|
||||
// if(result==nil) {
|
||||
// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
|
||||
// result.EOF = TokenEOF
|
||||
// tokenTypeMapCache[tokenNames] = result
|
||||
// }
|
||||
// return result
|
||||
//}
|
||||
|
||||
// GetRuleIndexMap Get a map from rule names to rule indexes.
|
||||
//
|
||||
// Used for XPath and tree pattern compilation.
|
||||
//
|
||||
// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed.
|
||||
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
|
||||
|
||||
panic("Method not defined!")
|
||||
// var ruleNames = b.GetRuleNames()
|
||||
// if (ruleNames==nil) {
|
||||
// panic("The current recognizer does not provide a list of rule names.")
|
||||
// }
|
||||
//
|
||||
// var result = ruleIndexMapCache[ruleNames]
|
||||
// if(result==nil) {
|
||||
// result = ruleNames.reduce(function(o, k, i) { o[k] = i })
|
||||
// ruleIndexMapCache[ruleNames] = result
|
||||
// }
|
||||
// return result
|
||||
}
|
||||
|
||||
// GetTokenType get the token type based upon its name
|
||||
func (b *BaseRecognizer) GetTokenType(_ string) int {
|
||||
panic("Method not defined!")
|
||||
// var ttype = b.GetTokenTypeMap()[tokenName]
|
||||
// if (ttype !=nil) {
|
||||
// return ttype
|
||||
// } else {
|
||||
// return TokenInvalidType
|
||||
// }
|
||||
}
|
||||
|
||||
//func (b *Recognizer) GetTokenTypeMap() map[string]int {
|
||||
// Vocabulary vocabulary = getVocabulary()
|
||||
//
|
||||
// Synchronized (tokenTypeMapCache) {
|
||||
// Map<String, Integer> result = tokenTypeMapCache.Get(vocabulary)
|
||||
// if (result == null) {
|
||||
// result = new HashMap<String, Integer>()
|
||||
// for (int i = 0; i < GetATN().maxTokenType; i++) {
|
||||
// String literalName = vocabulary.getLiteralName(i)
|
||||
// if (literalName != null) {
|
||||
// result.put(literalName, i)
|
||||
// }
|
||||
//
|
||||
// String symbolicName = vocabulary.GetSymbolicName(i)
|
||||
// if (symbolicName != null) {
|
||||
// result.put(symbolicName, i)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// result.put("EOF", Token.EOF)
|
||||
// result = Collections.unmodifiableMap(result)
|
||||
// tokenTypeMapCache.put(vocabulary, result)
|
||||
// }
|
||||
//
|
||||
// return result
|
||||
// }
|
||||
//}
|
||||
|
||||
// GetErrorHeader returns the error header, normally line/character position information.
|
||||
//
|
||||
// Can be overridden in sub structs embedding BaseRecognizer.
|
||||
func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
|
||||
line := e.GetOffendingToken().GetLine()
|
||||
column := e.GetOffendingToken().GetColumn()
|
||||
return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
|
||||
}
|
||||
|
||||
// GetTokenErrorDisplay shows how a token should be displayed in an error message.
|
||||
//
|
||||
// The default is to display just the text, but during development you might
|
||||
// want to have a lot of information spit out. Override in that case
|
||||
// to use t.String() (which, for CommonToken, dumps everything about
|
||||
// the token). This is better than forcing you to override a method in
|
||||
// your token objects because you don't have to go modify your lexer
|
||||
// so that it creates a NewJava type.
|
||||
//
|
||||
// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific
|
||||
// implementations of [ANTLRErrorStrategy] may provide a similar
|
||||
// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay()
|
||||
func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
|
||||
if t == nil {
|
||||
return "<no token>"
|
||||
}
|
||||
s := t.GetText()
|
||||
if s == "" {
|
||||
if t.GetTokenType() == TokenEOF {
|
||||
s = "<EOF>"
|
||||
} else {
|
||||
s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
|
||||
}
|
||||
}
|
||||
s = strings.Replace(s, "\t", "\\t", -1)
|
||||
s = strings.Replace(s, "\n", "\\n", -1)
|
||||
s = strings.Replace(s, "\r", "\\r", -1)
|
||||
|
||||
return "'" + s + "'"
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
|
||||
return NewProxyErrorListener(b.listeners)
|
||||
}
|
||||
|
||||
// Sempred embedding structs need to override this if there are sempreds or actions
|
||||
// that the ATN interpreter needs to execute
|
||||
func (b *BaseRecognizer) Sempred(_ RuleContext, _ int, _ int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Precpred embedding structs need to override this if there are preceding predicates
|
||||
// that the ATN interpreter needs to execute
|
||||
func (b *BaseRecognizer) Precpred(_ RuleContext, _ int) bool {
|
||||
return true
|
||||
}
|
40
e2e/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
generated
vendored
Normal file
40
e2e/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// RuleContext is a record of a single rule invocation. It knows
|
||||
// which context invoked it, if any. If there is no parent context, then
|
||||
// naturally the invoking state is not valid. The parent link
|
||||
// provides a chain upwards from the current rule invocation to the root
|
||||
// of the invocation tree, forming a stack.
|
||||
//
|
||||
// We actually carry no information about the rule associated with this context (except
|
||||
// when parsing). We keep only the state number of the invoking state from
|
||||
// the [ATN] submachine that invoked this. Contrast this with the s
|
||||
// pointer inside [ParserRuleContext] that tracks the current state
|
||||
// being "executed" for the current rule.
|
||||
//
|
||||
// The parent contexts are useful for computing lookahead sets and
|
||||
// getting error information.
|
||||
//
|
||||
// These objects are used during parsing and prediction.
|
||||
// For the special case of parsers, we use the struct
|
||||
// [ParserRuleContext], which embeds a RuleContext.
|
||||
//
|
||||
// @see ParserRuleContext
|
||||
type RuleContext interface {
|
||||
RuleNode
|
||||
|
||||
GetInvokingState() int
|
||||
SetInvokingState(int)
|
||||
|
||||
GetRuleIndex() int
|
||||
IsEmpty() bool
|
||||
|
||||
GetAltNumber() int
|
||||
SetAltNumber(altNumber int)
|
||||
|
||||
String([]string, RuleContext) string
|
||||
}
|
464
e2e/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
generated
vendored
Normal file
464
e2e/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
generated
vendored
Normal file
@ -0,0 +1,464 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// SemanticContext is a tree structure used to record the semantic context in which
|
||||
//
|
||||
// an ATN configuration is valid. It's either a single predicate,
|
||||
// a conjunction p1 && p2, or a sum of products p1 || p2.
|
||||
//
|
||||
// I have scoped the AND, OR, and Predicate subclasses of
|
||||
// [SemanticContext] within the scope of this outer ``class''
|
||||
type SemanticContext interface {
|
||||
Equals(other Collectable[SemanticContext]) bool
|
||||
Hash() int
|
||||
|
||||
evaluate(parser Recognizer, outerContext RuleContext) bool
|
||||
evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
|
||||
|
||||
String() string
|
||||
}
|
||||
|
||||
func SemanticContextandContext(a, b SemanticContext) SemanticContext {
|
||||
if a == nil || a == SemanticContextNone {
|
||||
return b
|
||||
}
|
||||
if b == nil || b == SemanticContextNone {
|
||||
return a
|
||||
}
|
||||
result := NewAND(a, b)
|
||||
if len(result.opnds) == 1 {
|
||||
return result.opnds[0]
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func SemanticContextorContext(a, b SemanticContext) SemanticContext {
|
||||
if a == nil {
|
||||
return b
|
||||
}
|
||||
if b == nil {
|
||||
return a
|
||||
}
|
||||
if a == SemanticContextNone || b == SemanticContextNone {
|
||||
return SemanticContextNone
|
||||
}
|
||||
result := NewOR(a, b)
|
||||
if len(result.opnds) == 1 {
|
||||
return result.opnds[0]
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
type Predicate struct {
|
||||
ruleIndex int
|
||||
predIndex int
|
||||
isCtxDependent bool
|
||||
}
|
||||
|
||||
func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
|
||||
p := new(Predicate)
|
||||
|
||||
p.ruleIndex = ruleIndex
|
||||
p.predIndex = predIndex
|
||||
p.isCtxDependent = isCtxDependent // e.g., $i ref in pred
|
||||
return p
|
||||
}
|
||||
|
||||
//The default {@link SemanticContext}, which is semantically equivalent to
|
||||
//a predicate of the form {@code {true}?}.
|
||||
|
||||
var SemanticContextNone = NewPredicate(-1, -1, false)
|
||||
|
||||
func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext {
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
|
||||
|
||||
var localctx RuleContext
|
||||
|
||||
if p.isCtxDependent {
|
||||
localctx = outerContext
|
||||
}
|
||||
|
||||
return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
|
||||
}
|
||||
|
||||
func (p *Predicate) Equals(other Collectable[SemanticContext]) bool {
|
||||
if p == other {
|
||||
return true
|
||||
} else if _, ok := other.(*Predicate); !ok {
|
||||
return false
|
||||
} else {
|
||||
return p.ruleIndex == other.(*Predicate).ruleIndex &&
|
||||
p.predIndex == other.(*Predicate).predIndex &&
|
||||
p.isCtxDependent == other.(*Predicate).isCtxDependent
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Predicate) Hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, p.ruleIndex)
|
||||
h = murmurUpdate(h, p.predIndex)
|
||||
if p.isCtxDependent {
|
||||
h = murmurUpdate(h, 1)
|
||||
} else {
|
||||
h = murmurUpdate(h, 0)
|
||||
}
|
||||
return murmurFinish(h, 3)
|
||||
}
|
||||
|
||||
func (p *Predicate) String() string {
|
||||
return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?"
|
||||
}
|
||||
|
||||
type PrecedencePredicate struct {
|
||||
precedence int
|
||||
}
|
||||
|
||||
func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
|
||||
|
||||
p := new(PrecedencePredicate)
|
||||
p.precedence = precedence
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
|
||||
return parser.Precpred(outerContext, p.precedence)
|
||||
}
|
||||
|
||||
func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
|
||||
if parser.Precpred(outerContext, p.precedence) {
|
||||
return SemanticContextNone
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
|
||||
return p.precedence - other.precedence
|
||||
}
|
||||
|
||||
func (p *PrecedencePredicate) Equals(other Collectable[SemanticContext]) bool {
|
||||
|
||||
var op *PrecedencePredicate
|
||||
var ok bool
|
||||
if op, ok = other.(*PrecedencePredicate); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if p == op {
|
||||
return true
|
||||
}
|
||||
|
||||
return p.precedence == other.(*PrecedencePredicate).precedence
|
||||
}
|
||||
|
||||
func (p *PrecedencePredicate) Hash() int {
|
||||
h := uint32(1)
|
||||
h = 31*h + uint32(p.precedence)
|
||||
return int(h)
|
||||
}
|
||||
|
||||
func (p *PrecedencePredicate) String() string {
|
||||
return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
|
||||
}
|
||||
|
||||
func PrecedencePredicatefilterPrecedencePredicates(set *JStore[SemanticContext, Comparator[SemanticContext]]) []*PrecedencePredicate {
|
||||
result := make([]*PrecedencePredicate, 0)
|
||||
|
||||
set.Each(func(v SemanticContext) bool {
|
||||
if c2, ok := v.(*PrecedencePredicate); ok {
|
||||
result = append(result, c2)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// A semantic context which is true whenever none of the contained contexts
|
||||
// is false.`
|
||||
|
||||
type AND struct {
|
||||
opnds []SemanticContext
|
||||
}
|
||||
|
||||
func NewAND(a, b SemanticContext) *AND {
|
||||
|
||||
operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands")
|
||||
if aa, ok := a.(*AND); ok {
|
||||
for _, o := range aa.opnds {
|
||||
operands.Put(o)
|
||||
}
|
||||
} else {
|
||||
operands.Put(a)
|
||||
}
|
||||
|
||||
if ba, ok := b.(*AND); ok {
|
||||
for _, o := range ba.opnds {
|
||||
operands.Put(o)
|
||||
}
|
||||
} else {
|
||||
operands.Put(b)
|
||||
}
|
||||
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
|
||||
if len(precedencePredicates) > 0 {
|
||||
// interested in the transition with the lowest precedence
|
||||
var reduced *PrecedencePredicate
|
||||
|
||||
for _, p := range precedencePredicates {
|
||||
if reduced == nil || p.precedence < reduced.precedence {
|
||||
reduced = p
|
||||
}
|
||||
}
|
||||
|
||||
operands.Put(reduced)
|
||||
}
|
||||
|
||||
vs := operands.Values()
|
||||
opnds := make([]SemanticContext, len(vs))
|
||||
copy(opnds, vs)
|
||||
|
||||
and := new(AND)
|
||||
and.opnds = opnds
|
||||
|
||||
return and
|
||||
}
|
||||
|
||||
func (a *AND) Equals(other Collectable[SemanticContext]) bool {
|
||||
if a == other {
|
||||
return true
|
||||
}
|
||||
if _, ok := other.(*AND); !ok {
|
||||
return false
|
||||
} else {
|
||||
for i, v := range other.(*AND).opnds {
|
||||
if !a.opnds[i].Equals(v) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// {@inheritDoc}
|
||||
//
|
||||
// <p>
|
||||
// The evaluation of predicates by a context is short-circuiting, but
|
||||
// unordered.</p>
|
||||
func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
|
||||
for i := 0; i < len(a.opnds); i++ {
|
||||
if !a.opnds[i].evaluate(parser, outerContext) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
|
||||
differs := false
|
||||
operands := make([]SemanticContext, 0)
|
||||
|
||||
for i := 0; i < len(a.opnds); i++ {
|
||||
context := a.opnds[i]
|
||||
evaluated := context.evalPrecedence(parser, outerContext)
|
||||
differs = differs || (evaluated != context)
|
||||
if evaluated == nil {
|
||||
// The AND context is false if any element is false
|
||||
return nil
|
||||
} else if evaluated != SemanticContextNone {
|
||||
// Reduce the result by Skipping true elements
|
||||
operands = append(operands, evaluated)
|
||||
}
|
||||
}
|
||||
if !differs {
|
||||
return a
|
||||
}
|
||||
|
||||
if len(operands) == 0 {
|
||||
// all elements were true, so the AND context is true
|
||||
return SemanticContextNone
|
||||
}
|
||||
|
||||
var result SemanticContext
|
||||
|
||||
for _, o := range operands {
|
||||
if result == nil {
|
||||
result = o
|
||||
} else {
|
||||
result = SemanticContextandContext(result, o)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (a *AND) Hash() int {
|
||||
h := murmurInit(37) // Init with a value different from OR
|
||||
for _, op := range a.opnds {
|
||||
h = murmurUpdate(h, op.Hash())
|
||||
}
|
||||
return murmurFinish(h, len(a.opnds))
|
||||
}
|
||||
|
||||
func (o *OR) Hash() int {
|
||||
h := murmurInit(41) // Init with o value different from AND
|
||||
for _, op := range o.opnds {
|
||||
h = murmurUpdate(h, op.Hash())
|
||||
}
|
||||
return murmurFinish(h, len(o.opnds))
|
||||
}
|
||||
|
||||
func (a *AND) String() string {
|
||||
s := ""
|
||||
|
||||
for _, o := range a.opnds {
|
||||
s += "&& " + fmt.Sprint(o)
|
||||
}
|
||||
|
||||
if len(s) > 3 {
|
||||
return s[0:3]
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
//
|
||||
// A semantic context which is true whenever at least one of the contained
|
||||
// contexts is true.
|
||||
//
|
||||
|
||||
type OR struct {
|
||||
opnds []SemanticContext
|
||||
}
|
||||
|
||||
func NewOR(a, b SemanticContext) *OR {
|
||||
|
||||
operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands")
|
||||
if aa, ok := a.(*OR); ok {
|
||||
for _, o := range aa.opnds {
|
||||
operands.Put(o)
|
||||
}
|
||||
} else {
|
||||
operands.Put(a)
|
||||
}
|
||||
|
||||
if ba, ok := b.(*OR); ok {
|
||||
for _, o := range ba.opnds {
|
||||
operands.Put(o)
|
||||
}
|
||||
} else {
|
||||
operands.Put(b)
|
||||
}
|
||||
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
|
||||
if len(precedencePredicates) > 0 {
|
||||
// interested in the transition with the lowest precedence
|
||||
var reduced *PrecedencePredicate
|
||||
|
||||
for _, p := range precedencePredicates {
|
||||
if reduced == nil || p.precedence > reduced.precedence {
|
||||
reduced = p
|
||||
}
|
||||
}
|
||||
|
||||
operands.Put(reduced)
|
||||
}
|
||||
|
||||
vs := operands.Values()
|
||||
|
||||
opnds := make([]SemanticContext, len(vs))
|
||||
copy(opnds, vs)
|
||||
|
||||
o := new(OR)
|
||||
o.opnds = opnds
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *OR) Equals(other Collectable[SemanticContext]) bool {
|
||||
if o == other {
|
||||
return true
|
||||
} else if _, ok := other.(*OR); !ok {
|
||||
return false
|
||||
} else {
|
||||
for i, v := range other.(*OR).opnds {
|
||||
if !o.opnds[i].Equals(v) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// <p>
|
||||
// The evaluation of predicates by o context is short-circuiting, but
|
||||
// unordered.</p>
|
||||
func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
|
||||
for i := 0; i < len(o.opnds); i++ {
|
||||
if o.opnds[i].evaluate(parser, outerContext) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
|
||||
differs := false
|
||||
operands := make([]SemanticContext, 0)
|
||||
for i := 0; i < len(o.opnds); i++ {
|
||||
context := o.opnds[i]
|
||||
evaluated := context.evalPrecedence(parser, outerContext)
|
||||
differs = differs || (evaluated != context)
|
||||
if evaluated == SemanticContextNone {
|
||||
// The OR context is true if any element is true
|
||||
return SemanticContextNone
|
||||
} else if evaluated != nil {
|
||||
// Reduce the result by Skipping false elements
|
||||
operands = append(operands, evaluated)
|
||||
}
|
||||
}
|
||||
if !differs {
|
||||
return o
|
||||
}
|
||||
if len(operands) == 0 {
|
||||
// all elements were false, so the OR context is false
|
||||
return nil
|
||||
}
|
||||
var result SemanticContext
|
||||
|
||||
for _, o := range operands {
|
||||
if result == nil {
|
||||
result = o
|
||||
} else {
|
||||
result = SemanticContextorContext(result, o)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (o *OR) String() string {
|
||||
s := ""
|
||||
|
||||
for _, o := range o.opnds {
|
||||
s += "|| " + fmt.Sprint(o)
|
||||
}
|
||||
|
||||
if len(s) > 3 {
|
||||
return s[0:3]
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
281
e2e/vendor/github.com/antlr4-go/antlr/v4/statistics.go
generated
vendored
Normal file
281
e2e/vendor/github.com/antlr4-go/antlr/v4/statistics.go
generated
vendored
Normal file
@ -0,0 +1,281 @@
|
||||
//go:build antlr.stats
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default
|
||||
// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag.
|
||||
//
|
||||
|
||||
// Tells various components to collect statistics - because it is only true when this file is included, it will
|
||||
// allow the compiler to completely eliminate all the code that is only used when collecting statistics.
|
||||
const collectStats = true
|
||||
|
||||
// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run.
|
||||
// It is exported so that it can be used by others to look for things that are not already looked for in the
|
||||
// runtime statistics.
|
||||
type goRunStats struct {
|
||||
|
||||
// jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created
|
||||
// during a run. It is exported so that it can be used by others to look for things that are not already looked for
|
||||
// within this package.
|
||||
//
|
||||
jStats []*JStatRec
|
||||
jStatsLock sync.RWMutex
|
||||
topN int
|
||||
topNByMax []*JStatRec
|
||||
topNByUsed []*JStatRec
|
||||
unusedCollections map[CollectionSource]int
|
||||
counts map[CollectionSource]int
|
||||
}
|
||||
|
||||
const (
|
||||
collectionsFile = "collections"
|
||||
)
|
||||
|
||||
var (
|
||||
Statistics = &goRunStats{
|
||||
topN: 10,
|
||||
}
|
||||
)
|
||||
|
||||
type statsOption func(*goRunStats) error
|
||||
|
||||
// Configure allows the statistics system to be configured as the user wants and override the defaults
|
||||
func (s *goRunStats) Configure(options ...statsOption) error {
|
||||
for _, option := range options {
|
||||
err := option(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithTopN sets the number of things to list in the report when we are concerned with the top N things.
|
||||
//
|
||||
// For example, if you want to see the top 20 collections by size, you can do:
|
||||
//
|
||||
// antlr.Statistics.Configure(antlr.WithTopN(20))
|
||||
func WithTopN(topN int) statsOption {
|
||||
return func(s *goRunStats) error {
|
||||
s.topN = topN
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user.
|
||||
//
|
||||
// The function gathers and analyzes a number of statistics about any particular run of
|
||||
// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only
|
||||
// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be
|
||||
// especially useful in tracking down bugs or performance problems when an ANTLR user could
|
||||
// supply the output from this package, but cannot supply the grammar file(s) they are using, even
|
||||
// privately to the maintainers.
|
||||
//
|
||||
// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user
|
||||
// must call this function their selves to analyze the statistics. This is because none of the infrastructure is
|
||||
// extant unless the calling program is built with the antlr.stats tag like so:
|
||||
//
|
||||
// go build -tags antlr.stats .
|
||||
//
|
||||
// When a program is built with the antlr.stats tag, the Statistics object is created and available outside
|
||||
// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the
|
||||
// [Statistics.Report] function to report the statistics.
|
||||
//
|
||||
// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to
|
||||
// me [Jim Idle] directly at jimi@idle.ws
|
||||
//
|
||||
// [Jim Idle]: https:://github.com/jim-idle
|
||||
func (s *goRunStats) Analyze() {
|
||||
|
||||
// Look for anything that looks strange and record it in our local maps etc for the report to present it
|
||||
//
|
||||
s.CollectionAnomalies()
|
||||
s.TopNCollections()
|
||||
}
|
||||
|
||||
// TopNCollections looks through all the statistical records and gathers the top ten collections by size.
|
||||
func (s *goRunStats) TopNCollections() {
|
||||
|
||||
// Let's sort the stat records by MaxSize
|
||||
//
|
||||
sort.Slice(s.jStats, func(i, j int) bool {
|
||||
return s.jStats[i].MaxSize > s.jStats[j].MaxSize
|
||||
})
|
||||
|
||||
for i := 0; i < len(s.jStats) && i < s.topN; i++ {
|
||||
s.topNByMax = append(s.topNByMax, s.jStats[i])
|
||||
}
|
||||
|
||||
// Sort by the number of times used
|
||||
//
|
||||
sort.Slice(s.jStats, func(i, j int) bool {
|
||||
return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts
|
||||
})
|
||||
for i := 0; i < len(s.jStats) && i < s.topN; i++ {
|
||||
s.topNByUsed = append(s.topNByUsed, s.jStats[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output
|
||||
// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be
|
||||
// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix.
|
||||
func (s *goRunStats) Report(dir string, prefix string) error {
|
||||
|
||||
isDir, err := isDirectory(dir)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case !isDir:
|
||||
return fmt.Errorf("output directory `%s` is not a directory", dir)
|
||||
}
|
||||
s.reportCollections(dir, prefix)
|
||||
|
||||
// Clean out any old data in case the user forgets
|
||||
//
|
||||
s.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *goRunStats) Reset() {
|
||||
s.jStats = nil
|
||||
s.topNByUsed = nil
|
||||
s.topNByMax = nil
|
||||
}
|
||||
|
||||
func (s *goRunStats) reportCollections(dir, prefix string) {
|
||||
cname := filepath.Join(dir, ".asciidoctor")
|
||||
// If the file doesn't exist, create it, or append to the file
|
||||
f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_, _ = f.WriteString(`// .asciidoctorconfig
|
||||
++++
|
||||
<style>
|
||||
body {
|
||||
font-family: "Quicksand", "Montserrat", "Helvetica";
|
||||
background-color: black;
|
||||
}
|
||||
</style>
|
||||
++++`)
|
||||
_ = f.Close()
|
||||
|
||||
fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc")
|
||||
// If the file doesn't exist, create it, or append to the file
|
||||
f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer func(f *os.File) {
|
||||
err := f.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}(f)
|
||||
_, _ = f.WriteString("= Collections for " + prefix + "\n\n")
|
||||
|
||||
_, _ = f.WriteString("== Summary\n")
|
||||
|
||||
if s.unusedCollections != nil {
|
||||
_, _ = f.WriteString("=== Unused Collections\n")
|
||||
_, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n")
|
||||
_, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n")
|
||||
_, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n")
|
||||
_, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n")
|
||||
_, _ = f.WriteString(" actually needed.\n\n")
|
||||
|
||||
_, _ = f.WriteString("\n.Unused collections\n")
|
||||
_, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
|
||||
_, _ = f.WriteString("|===\n")
|
||||
_, _ = f.WriteString("| Type | Count\n")
|
||||
|
||||
for k, v := range s.unusedCollections {
|
||||
_, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
|
||||
}
|
||||
f.WriteString("|===\n\n")
|
||||
}
|
||||
|
||||
_, _ = f.WriteString("\n.Summary of Collections\n")
|
||||
_, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
|
||||
_, _ = f.WriteString("|===\n")
|
||||
_, _ = f.WriteString("| Type | Count\n")
|
||||
for k, v := range s.counts {
|
||||
_, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
|
||||
}
|
||||
_, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n")
|
||||
_, _ = f.WriteString("|===\n\n")
|
||||
|
||||
_, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n")
|
||||
_, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n")
|
||||
_, _ = f.WriteString("|===\n")
|
||||
_, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n")
|
||||
for _, c := range s.topNByMax {
|
||||
_, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
|
||||
_, _ = f.WriteString("| " + c.Description + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
|
||||
_, _ = f.WriteString("\n")
|
||||
}
|
||||
_, _ = f.WriteString("|===\n\n")
|
||||
|
||||
_, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n")
|
||||
_, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n")
|
||||
_, _ = f.WriteString("|===\n")
|
||||
_, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n")
|
||||
for _, c := range s.topNByUsed {
|
||||
_, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
|
||||
_, _ = f.WriteString("| " + c.Description + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n")
|
||||
_, _ = f.WriteString("\n")
|
||||
}
|
||||
_, _ = f.WriteString("|===\n\n")
|
||||
}
|
||||
|
||||
// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled.
|
||||
func (s *goRunStats) AddJStatRec(rec *JStatRec) {
|
||||
s.jStatsLock.Lock()
|
||||
defer s.jStatsLock.Unlock()
|
||||
s.jStats = append(s.jStats, rec)
|
||||
}
|
||||
|
||||
// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found.
|
||||
func (s *goRunStats) CollectionAnomalies() {
|
||||
s.jStatsLock.RLock()
|
||||
defer s.jStatsLock.RUnlock()
|
||||
s.counts = make(map[CollectionSource]int, len(s.jStats))
|
||||
for _, c := range s.jStats {
|
||||
|
||||
// Accumlate raw counts
|
||||
//
|
||||
s.counts[c.Source]++
|
||||
|
||||
// Look for allocated but unused collections and count them
|
||||
if c.MaxSize == 0 && c.Puts == 0 {
|
||||
if s.unusedCollections == nil {
|
||||
s.unusedCollections = make(map[CollectionSource]int)
|
||||
}
|
||||
s.unusedCollections[c.Source]++
|
||||
}
|
||||
if c.MaxSize > 6000 {
|
||||
fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
23
e2e/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
generated
vendored
Normal file
23
e2e/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
package antlr
|
||||
|
||||
// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be
|
||||
// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies
|
||||
// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function
|
||||
// for ideas on what can be gleaned from these statistics about collections.
|
||||
type JStatRec struct {
|
||||
Source CollectionSource
|
||||
MaxSize int
|
||||
CurSize int
|
||||
Gets int
|
||||
GetHits int
|
||||
GetMisses int
|
||||
GetHashConflicts int
|
||||
GetNoEnt int
|
||||
Puts int
|
||||
PutHits int
|
||||
PutMisses int
|
||||
PutHashConflicts int
|
||||
MaxSlotSize int
|
||||
Description string
|
||||
CreateStack []byte
|
||||
}
|
213
e2e/vendor/github.com/antlr4-go/antlr/v4/token.go
generated
vendored
Normal file
213
e2e/vendor/github.com/antlr4-go/antlr/v4/token.go
generated
vendored
Normal file
@ -0,0 +1,213 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type TokenSourceCharStreamPair struct {
|
||||
tokenSource TokenSource
|
||||
charStream CharStream
|
||||
}
|
||||
|
||||
// A token has properties: text, type, line, character position in the line
|
||||
// (so we can ignore tabs), token channel, index, and source from which
|
||||
// we obtained this token.
|
||||
|
||||
type Token interface {
|
||||
GetSource() *TokenSourceCharStreamPair
|
||||
GetTokenType() int
|
||||
GetChannel() int
|
||||
GetStart() int
|
||||
GetStop() int
|
||||
GetLine() int
|
||||
GetColumn() int
|
||||
|
||||
GetText() string
|
||||
SetText(s string)
|
||||
|
||||
GetTokenIndex() int
|
||||
SetTokenIndex(v int)
|
||||
|
||||
GetTokenSource() TokenSource
|
||||
GetInputStream() CharStream
|
||||
|
||||
String() string
|
||||
}
|
||||
|
||||
type BaseToken struct {
|
||||
source *TokenSourceCharStreamPair
|
||||
tokenType int // token type of the token
|
||||
channel int // The parser ignores everything not on DEFAULT_CHANNEL
|
||||
start int // optional return -1 if not implemented.
|
||||
stop int // optional return -1 if not implemented.
|
||||
tokenIndex int // from 0..n-1 of the token object in the input stream
|
||||
line int // line=1..n of the 1st character
|
||||
column int // beginning of the line at which it occurs, 0..n-1
|
||||
text string // text of the token.
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
const (
|
||||
TokenInvalidType = 0
|
||||
|
||||
// TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state
|
||||
// and did not follow it despite needing to.
|
||||
TokenEpsilon = -2
|
||||
|
||||
TokenMinUserTokenType = 1
|
||||
|
||||
TokenEOF = -1
|
||||
|
||||
// TokenDefaultChannel is the default channel upon which tokens are sent to the parser.
|
||||
//
|
||||
// All tokens go to the parser (unless [Skip] is called in the lexer rule)
|
||||
// on a particular "channel". The parser tunes to a particular channel
|
||||
// so that whitespace etc... can go to the parser on a "hidden" channel.
|
||||
TokenDefaultChannel = 0
|
||||
|
||||
// TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel].
|
||||
//
|
||||
// Anything on a different channel than TokenDefaultChannel is not parsed by parser.
|
||||
TokenHiddenChannel = 1
|
||||
)
|
||||
|
||||
func (b *BaseToken) GetChannel() int {
|
||||
return b.channel
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetStart() int {
|
||||
return b.start
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetStop() int {
|
||||
return b.stop
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetLine() int {
|
||||
return b.line
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetColumn() int {
|
||||
return b.column
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetTokenType() int {
|
||||
return b.tokenType
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
|
||||
return b.source
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetTokenIndex() int {
|
||||
return b.tokenIndex
|
||||
}
|
||||
|
||||
func (b *BaseToken) SetTokenIndex(v int) {
|
||||
b.tokenIndex = v
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetTokenSource() TokenSource {
|
||||
return b.source.tokenSource
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetInputStream() CharStream {
|
||||
return b.source.charStream
|
||||
}
|
||||
|
||||
type CommonToken struct {
|
||||
BaseToken
|
||||
}
|
||||
|
||||
func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
|
||||
|
||||
t := &CommonToken{
|
||||
BaseToken: BaseToken{
|
||||
source: source,
|
||||
tokenType: tokenType,
|
||||
channel: channel,
|
||||
start: start,
|
||||
stop: stop,
|
||||
tokenIndex: -1,
|
||||
},
|
||||
}
|
||||
|
||||
if t.source.tokenSource != nil {
|
||||
t.line = source.tokenSource.GetLine()
|
||||
t.column = source.tokenSource.GetCharPositionInLine()
|
||||
} else {
|
||||
t.column = -1
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// An empty {@link Pair} which is used as the default value of
|
||||
// {@link //source} for tokens that do not have a source.
|
||||
|
||||
//CommonToken.EMPTY_SOURCE = [ nil, nil ]
|
||||
|
||||
// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
|
||||
//
|
||||
// <p>
|
||||
// If {@code oldToken} is also a {@link CommonToken} instance, the newly
|
||||
// constructed token will share a reference to the {@link //text} field and
|
||||
// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
|
||||
// be assigned the result of calling {@link //GetText}, and {@link //source}
|
||||
// will be constructed from the result of {@link Token//GetTokenSource} and
|
||||
// {@link Token//GetInputStream}.</p>
|
||||
//
|
||||
// @param oldToken The token to copy.
|
||||
func (c *CommonToken) clone() *CommonToken {
|
||||
t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
|
||||
t.tokenIndex = c.GetTokenIndex()
|
||||
t.line = c.GetLine()
|
||||
t.column = c.GetColumn()
|
||||
t.text = c.GetText()
|
||||
return t
|
||||
}
|
||||
|
||||
func (c *CommonToken) GetText() string {
|
||||
if c.text != "" {
|
||||
return c.text
|
||||
}
|
||||
input := c.GetInputStream()
|
||||
if input == nil {
|
||||
return ""
|
||||
}
|
||||
n := input.Size()
|
||||
if c.start < n && c.stop < n {
|
||||
return input.GetTextFromInterval(NewInterval(c.start, c.stop))
|
||||
}
|
||||
return "<EOF>"
|
||||
}
|
||||
|
||||
func (c *CommonToken) SetText(text string) {
|
||||
c.text = text
|
||||
}
|
||||
|
||||
func (c *CommonToken) String() string {
|
||||
txt := c.GetText()
|
||||
if txt != "" {
|
||||
txt = strings.Replace(txt, "\n", "\\n", -1)
|
||||
txt = strings.Replace(txt, "\r", "\\r", -1)
|
||||
txt = strings.Replace(txt, "\t", "\\t", -1)
|
||||
} else {
|
||||
txt = "<no text>"
|
||||
}
|
||||
|
||||
var ch string
|
||||
if c.channel > 0 {
|
||||
ch = ",channel=" + strconv.Itoa(c.channel)
|
||||
} else {
|
||||
ch = ""
|
||||
}
|
||||
|
||||
return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
|
||||
txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
|
||||
ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
|
||||
}
|
17
e2e/vendor/github.com/antlr4-go/antlr/v4/token_source.go
generated
vendored
Normal file
17
e2e/vendor/github.com/antlr4-go/antlr/v4/token_source.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
type TokenSource interface {
|
||||
NextToken() Token
|
||||
Skip()
|
||||
More()
|
||||
GetLine() int
|
||||
GetCharPositionInLine() int
|
||||
GetInputStream() CharStream
|
||||
GetSourceName() string
|
||||
setTokenFactory(factory TokenFactory)
|
||||
GetTokenFactory() TokenFactory
|
||||
}
|
21
e2e/vendor/github.com/antlr4-go/antlr/v4/token_stream.go
generated
vendored
Normal file
21
e2e/vendor/github.com/antlr4-go/antlr/v4/token_stream.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
type TokenStream interface {
|
||||
IntStream
|
||||
|
||||
LT(k int) Token
|
||||
Reset()
|
||||
|
||||
Get(index int) Token
|
||||
GetTokenSource() TokenSource
|
||||
SetTokenSource(TokenSource)
|
||||
|
||||
GetAllText() string
|
||||
GetTextFromInterval(Interval) string
|
||||
GetTextFromRuleContext(RuleContext) string
|
||||
GetTextFromTokens(Token, Token) string
|
||||
}
|
662
e2e/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
generated
vendored
Normal file
662
e2e/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
generated
vendored
Normal file
@ -0,0 +1,662 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
//
|
||||
// Useful for rewriting out a buffered input token stream after doing some
|
||||
// augmentation or other manipulations on it.
|
||||
|
||||
// <p>
|
||||
// You can insert stuff, replace, and delete chunks. Note that the operations
|
||||
// are done lazily--only if you convert the buffer to a {@link String} with
|
||||
// {@link TokenStream#getText()}. This is very efficient because you are not
|
||||
// moving data around all the time. As the buffer of tokens is converted to
|
||||
// strings, the {@link #getText()} method(s) scan the input token stream and
|
||||
// check to see if there is an operation at the current index. If so, the
|
||||
// operation is done and then normal {@link String} rendering continues on the
|
||||
// buffer. This is like having multiple Turing machine instruction streams
|
||||
// (programs) operating on a single input tape. :)</p>
|
||||
// <p>
|
||||
|
||||
// This rewriter makes no modifications to the token stream. It does not ask the
|
||||
// stream to fill itself up nor does it advance the input cursor. The token
|
||||
// stream {@link TokenStream#index()} will return the same value before and
|
||||
// after any {@link #getText()} call.</p>
|
||||
|
||||
// <p>
|
||||
// The rewriter only works on tokens that you have in the buffer and ignores the
|
||||
// current input cursor. If you are buffering tokens on-demand, calling
|
||||
// {@link #getText()} halfway through the input will only do rewrites for those
|
||||
// tokens in the first half of the file.</p>
|
||||
|
||||
// <p>
|
||||
// Since the operations are done lazily at {@link #getText}-time, operations do
|
||||
// not screw up the token index values. That is, an insert operation at token
|
||||
// index {@code i} does not change the index values for tokens
|
||||
// {@code i}+1..n-1.</p>
|
||||
|
||||
// <p>
|
||||
// Because operations never actually alter the buffer, you may always get the
|
||||
// original token stream back without undoing anything. Since the instructions
|
||||
// are queued up, you can easily simulate transactions and roll back any changes
|
||||
// if there is an error just by removing instructions. For example,</p>
|
||||
|
||||
// <pre>
|
||||
// CharStream input = new ANTLRFileStream("input");
|
||||
// TLexer lex = new TLexer(input);
|
||||
// CommonTokenStream tokens = new CommonTokenStream(lex);
|
||||
// T parser = new T(tokens);
|
||||
// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
|
||||
// parser.startRule();
|
||||
// </pre>
|
||||
|
||||
// <p>
|
||||
// Then in the rules, you can execute (assuming rewriter is visible):</p>
|
||||
|
||||
// <pre>
|
||||
// Token t,u;
|
||||
// ...
|
||||
// rewriter.insertAfter(t, "text to put after t");}
|
||||
// rewriter.insertAfter(u, "text after u");}
|
||||
// System.out.println(rewriter.getText());
|
||||
// </pre>
|
||||
|
||||
// <p>
|
||||
// You can also have multiple "instruction streams" and get multiple rewrites
|
||||
// from a single pass over the input. Just name the instruction streams and use
|
||||
// that name again when printing the buffer. This could be useful for generating
|
||||
// a C file and also its header file--all from the same buffer:</p>
|
||||
|
||||
// <pre>
|
||||
// rewriter.insertAfter("pass1", t, "text to put after t");}
|
||||
// rewriter.insertAfter("pass2", u, "text after u");}
|
||||
// System.out.println(rewriter.getText("pass1"));
|
||||
// System.out.println(rewriter.getText("pass2"));
|
||||
// </pre>
|
||||
|
||||
// <p>
|
||||
// If you don't use named rewrite streams, a "default" stream is used as the
|
||||
// first example shows.</p>
|
||||
|
||||
const (
|
||||
DefaultProgramName = "default"
|
||||
ProgramInitSize = 100
|
||||
MinTokenIndex = 0
|
||||
)
|
||||
|
||||
// Define the rewrite operation hierarchy
|
||||
|
||||
type RewriteOperation interface {
|
||||
|
||||
// Execute the rewrite operation by possibly adding to the buffer.
|
||||
// Return the index of the next token to operate on.
|
||||
Execute(buffer *bytes.Buffer) int
|
||||
String() string
|
||||
GetInstructionIndex() int
|
||||
GetIndex() int
|
||||
GetText() string
|
||||
GetOpName() string
|
||||
GetTokens() TokenStream
|
||||
SetInstructionIndex(val int)
|
||||
SetIndex(int)
|
||||
SetText(string)
|
||||
SetOpName(string)
|
||||
SetTokens(TokenStream)
|
||||
}
|
||||
|
||||
type BaseRewriteOperation struct {
|
||||
//Current index of rewrites list
|
||||
instructionIndex int
|
||||
//Token buffer index
|
||||
index int
|
||||
//Substitution text
|
||||
text string
|
||||
//Actual operation name
|
||||
opName string
|
||||
//Pointer to token steam
|
||||
tokens TokenStream
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) GetInstructionIndex() int {
|
||||
return op.instructionIndex
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) GetIndex() int {
|
||||
return op.index
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) GetText() string {
|
||||
return op.text
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) GetOpName() string {
|
||||
return op.opName
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) GetTokens() TokenStream {
|
||||
return op.tokens
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) SetInstructionIndex(val int) {
|
||||
op.instructionIndex = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) SetIndex(val int) {
|
||||
op.index = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) SetText(val string) {
|
||||
op.text = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) SetOpName(val string) {
|
||||
op.opName = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) SetTokens(val TokenStream) {
|
||||
op.tokens = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int {
|
||||
return op.index
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) String() string {
|
||||
return fmt.Sprintf("<%s@%d:\"%s\">",
|
||||
op.opName,
|
||||
op.tokens.Get(op.GetIndex()),
|
||||
op.text,
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
type InsertBeforeOp struct {
|
||||
BaseRewriteOperation
|
||||
}
|
||||
|
||||
func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp {
|
||||
return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{
|
||||
index: index,
|
||||
text: text,
|
||||
opName: "InsertBeforeOp",
|
||||
tokens: stream,
|
||||
}}
|
||||
}
|
||||
|
||||
func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int {
|
||||
buffer.WriteString(op.text)
|
||||
if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
|
||||
buffer.WriteString(op.tokens.Get(op.index).GetText())
|
||||
}
|
||||
return op.index + 1
|
||||
}
|
||||
|
||||
func (op *InsertBeforeOp) String() string {
|
||||
return op.BaseRewriteOperation.String()
|
||||
}
|
||||
|
||||
// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions
|
||||
// first and then the "insert before" instructions at same index. Implementation
|
||||
// of "insert after" is "insert before index+1".
|
||||
type InsertAfterOp struct {
|
||||
BaseRewriteOperation
|
||||
}
|
||||
|
||||
func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp {
|
||||
return &InsertAfterOp{
|
||||
BaseRewriteOperation: BaseRewriteOperation{
|
||||
index: index + 1,
|
||||
text: text,
|
||||
tokens: stream,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
|
||||
buffer.WriteString(op.text)
|
||||
if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
|
||||
buffer.WriteString(op.tokens.Get(op.index).GetText())
|
||||
}
|
||||
return op.index + 1
|
||||
}
|
||||
|
||||
func (op *InsertAfterOp) String() string {
|
||||
return op.BaseRewriteOperation.String()
|
||||
}
|
||||
|
||||
// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp
|
||||
// instructions.
|
||||
type ReplaceOp struct {
|
||||
BaseRewriteOperation
|
||||
LastIndex int
|
||||
}
|
||||
|
||||
func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp {
|
||||
return &ReplaceOp{
|
||||
BaseRewriteOperation: BaseRewriteOperation{
|
||||
index: from,
|
||||
text: text,
|
||||
opName: "ReplaceOp",
|
||||
tokens: stream,
|
||||
},
|
||||
LastIndex: to,
|
||||
}
|
||||
}
|
||||
|
||||
func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int {
|
||||
if op.text != "" {
|
||||
buffer.WriteString(op.text)
|
||||
}
|
||||
return op.LastIndex + 1
|
||||
}
|
||||
|
||||
func (op *ReplaceOp) String() string {
|
||||
if op.text == "" {
|
||||
return fmt.Sprintf("<DeleteOP@%d..%d>",
|
||||
op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
|
||||
}
|
||||
return fmt.Sprintf("<ReplaceOp@%d..%d:\"%s\">",
|
||||
op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
|
||||
}
|
||||
|
||||
type TokenStreamRewriter struct {
|
||||
//Our source stream
|
||||
tokens TokenStream
|
||||
// You may have multiple, named streams of rewrite operations.
|
||||
// I'm calling these things "programs."
|
||||
// Maps String (name) → rewrite (List)
|
||||
programs map[string][]RewriteOperation
|
||||
lastRewriteTokenIndexes map[string]int
|
||||
}
|
||||
|
||||
func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter {
|
||||
return &TokenStreamRewriter{
|
||||
tokens: tokens,
|
||||
programs: map[string][]RewriteOperation{
|
||||
DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize),
|
||||
},
|
||||
lastRewriteTokenIndexes: map[string]int{},
|
||||
}
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream {
|
||||
return tsr.tokens
|
||||
}
|
||||
|
||||
// Rollback the instruction stream for a program so that
|
||||
// the indicated instruction (via instructionIndex) is no
|
||||
// longer in the stream. UNTESTED!
|
||||
func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) {
|
||||
is, ok := tsr.programs[programName]
|
||||
if ok {
|
||||
tsr.programs[programName] = is[MinTokenIndex:instructionIndex]
|
||||
}
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) {
|
||||
tsr.Rollback(DefaultProgramName, instructionIndex)
|
||||
}
|
||||
|
||||
// DeleteProgram Reset the program so that no instructions exist
|
||||
func (tsr *TokenStreamRewriter) DeleteProgram(programName string) {
|
||||
tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) DeleteProgramDefault() {
|
||||
tsr.DeleteProgram(DefaultProgramName)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) {
|
||||
// to insert after, just insert before next index (even if past end)
|
||||
var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
|
||||
rewrites := tsr.GetProgram(programName)
|
||||
op.SetInstructionIndex(len(rewrites))
|
||||
tsr.AddToProgram(programName, op)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) {
|
||||
tsr.InsertAfter(DefaultProgramName, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) {
|
||||
tsr.InsertAfter(programName, token.GetTokenIndex(), text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) {
|
||||
var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
|
||||
rewrites := tsr.GetProgram(programName)
|
||||
op.SetInstructionIndex(len(rewrites))
|
||||
tsr.AddToProgram(programName, op)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) {
|
||||
tsr.InsertBefore(DefaultProgramName, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) {
|
||||
tsr.InsertBefore(programName, token.GetTokenIndex(), text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) {
|
||||
if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() {
|
||||
panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
|
||||
from, to, tsr.tokens.Size()))
|
||||
}
|
||||
var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
|
||||
rewrites := tsr.GetProgram(programName)
|
||||
op.SetInstructionIndex(len(rewrites))
|
||||
tsr.AddToProgram(programName, op)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) {
|
||||
tsr.Replace(DefaultProgramName, from, to, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) {
|
||||
tsr.ReplaceDefault(index, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) {
|
||||
tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) {
|
||||
tsr.ReplaceToken(DefaultProgramName, from, to, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) {
|
||||
tsr.ReplaceTokenDefault(index, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) {
|
||||
tsr.Replace(programName, from, to, "")
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) {
|
||||
tsr.Delete(DefaultProgramName, from, to)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) {
|
||||
tsr.DeleteDefault(index, index)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) {
|
||||
tsr.ReplaceToken(programName, from, to, "")
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) {
|
||||
tsr.DeleteToken(DefaultProgramName, from, to)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int {
|
||||
i, ok := tsr.lastRewriteTokenIndexes[programName]
|
||||
if !ok {
|
||||
return -1
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int {
|
||||
return tsr.GetLastRewriteTokenIndex(DefaultProgramName)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) {
|
||||
tsr.lastRewriteTokenIndexes[programName] = i
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation {
|
||||
is := make([]RewriteOperation, 0, ProgramInitSize)
|
||||
tsr.programs[name] = is
|
||||
return is
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) {
|
||||
is := tsr.GetProgram(name)
|
||||
is = append(is, op)
|
||||
tsr.programs[name] = is
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation {
|
||||
is, ok := tsr.programs[name]
|
||||
if !ok {
|
||||
is = tsr.InitializeProgram(name)
|
||||
}
|
||||
return is
|
||||
}
|
||||
|
||||
// GetTextDefault returns the text from the original tokens altered per the
|
||||
// instructions given to this rewriter.
|
||||
func (tsr *TokenStreamRewriter) GetTextDefault() string {
|
||||
return tsr.GetText(
|
||||
DefaultProgramName,
|
||||
NewInterval(0, tsr.tokens.Size()-1))
|
||||
}
|
||||
|
||||
// GetText returns the text from the original tokens altered per the
|
||||
// instructions given to this rewriter.
|
||||
func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string {
|
||||
rewrites := tsr.programs[programName]
|
||||
start := interval.Start
|
||||
stop := interval.Stop
|
||||
// ensure start/end are in range
|
||||
stop = min(stop, tsr.tokens.Size()-1)
|
||||
start = max(start, 0)
|
||||
if len(rewrites) == 0 {
|
||||
return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
|
||||
}
|
||||
buf := bytes.Buffer{}
|
||||
// First, optimize instruction stream
|
||||
indexToOp := reduceToSingleOperationPerIndex(rewrites)
|
||||
// Walk buffer, executing instructions and emitting tokens
|
||||
for i := start; i <= stop && i < tsr.tokens.Size(); {
|
||||
op := indexToOp[i]
|
||||
delete(indexToOp, i) // remove so any left have index size-1
|
||||
t := tsr.tokens.Get(i)
|
||||
if op == nil {
|
||||
// no operation at that index, just dump token
|
||||
if t.GetTokenType() != TokenEOF {
|
||||
buf.WriteString(t.GetText())
|
||||
}
|
||||
i++ // move to next token
|
||||
} else {
|
||||
i = op.Execute(&buf) // execute operation and skip
|
||||
}
|
||||
}
|
||||
// include stuff after end if it's last index in buffer
|
||||
// So, if they did an insertAfter(lastValidIndex, "foo"), include
|
||||
// foo if end==lastValidIndex.
|
||||
if stop == tsr.tokens.Size()-1 {
|
||||
// Scan any remaining operations after last token
|
||||
// should be included (they will be inserts).
|
||||
for _, op := range indexToOp {
|
||||
if op.GetIndex() >= tsr.tokens.Size()-1 {
|
||||
buf.WriteString(op.GetText())
|
||||
}
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// reduceToSingleOperationPerIndex combines operations and report invalid operations (like
|
||||
// overlapping replaces that are not completed nested). Inserts to
|
||||
// same index need to be combined etc...
|
||||
//
|
||||
// Here are the cases:
|
||||
//
|
||||
// I.i.u I.j.v leave alone, non-overlapping
|
||||
// I.i.u I.i.v combine: Iivu
|
||||
//
|
||||
// R.i-j.u R.x-y.v | i-j in x-y delete first R
|
||||
// R.i-j.u R.i-j.v delete first R
|
||||
// R.i-j.u R.x-y.v | x-y in i-j ERROR
|
||||
// R.i-j.u R.x-y.v | boundaries overlap ERROR
|
||||
//
|
||||
// Delete special case of replace (text==null):
|
||||
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
|
||||
//
|
||||
// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
|
||||
// we're not deleting i)
|
||||
// I.i.u R.x-y.v | i not in (x+1)-y leave alone, non-overlapping
|
||||
// R.x-y.v I.i.u | i in x-y ERROR
|
||||
// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
|
||||
// R.x-y.v I.i.u | i not in x-y leave alone, non-overlapping
|
||||
//
|
||||
// I.i.u = insert u before op @ index i
|
||||
// R.x-y.u = replace x-y indexed tokens with u
|
||||
//
|
||||
// First we need to examine replaces. For any replace op:
|
||||
//
|
||||
// 1. wipe out any insertions before op within that range.
|
||||
// 2. Drop any replace op before that is contained completely within
|
||||
// that range.
|
||||
// 3. Throw exception upon boundary overlap with any previous replace.
|
||||
//
|
||||
// Then we can deal with inserts:
|
||||
//
|
||||
// 1. for any inserts to same index, combine even if not adjacent.
|
||||
// 2. for any prior replace with same left boundary, combine this
|
||||
// insert with replace and delete this 'replace'.
|
||||
// 3. throw exception if index in same range as previous replace
|
||||
//
|
||||
// Don't actually delete; make op null in list. Easier to walk list.
|
||||
// Later we can throw as we add to index → op map.
|
||||
//
|
||||
// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
|
||||
// inserted stuff would be before the 'replace' range. But, if you
|
||||
// add tokens in front of a method body '{' and then delete the method
|
||||
// body, I think the stuff before the '{' you added should disappear too.
|
||||
//
|
||||
// The func returns a map from token index to operation.
|
||||
func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation {
|
||||
// WALK REPLACES
|
||||
for i := 0; i < len(rewrites); i++ {
|
||||
op := rewrites[i]
|
||||
if op == nil {
|
||||
continue
|
||||
}
|
||||
rop, ok := op.(*ReplaceOp)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// Wipe prior inserts within range
|
||||
for j := 0; j < i && j < len(rewrites); j++ {
|
||||
if iop, ok := rewrites[j].(*InsertBeforeOp); ok {
|
||||
if iop.index == rop.index {
|
||||
// E.g., insert before 2, delete 2..2; update replace
|
||||
// text to include insert before, kill insert
|
||||
rewrites[iop.instructionIndex] = nil
|
||||
if rop.text != "" {
|
||||
rop.text = iop.text + rop.text
|
||||
} else {
|
||||
rop.text = iop.text
|
||||
}
|
||||
} else if iop.index > rop.index && iop.index <= rop.LastIndex {
|
||||
// delete insert as it's a no-op.
|
||||
rewrites[iop.instructionIndex] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// Drop any prior replaces contained within
|
||||
for j := 0; j < i && j < len(rewrites); j++ {
|
||||
if prevop, ok := rewrites[j].(*ReplaceOp); ok {
|
||||
if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex {
|
||||
// delete replace as it's a no-op.
|
||||
rewrites[prevop.instructionIndex] = nil
|
||||
continue
|
||||
}
|
||||
// throw exception unless disjoint or identical
|
||||
disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
|
||||
// Delete special case of replace (text==null):
|
||||
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
|
||||
if prevop.text == "" && rop.text == "" && !disjoint {
|
||||
rewrites[prevop.instructionIndex] = nil
|
||||
rop.index = min(prevop.index, rop.index)
|
||||
rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
|
||||
} else if !disjoint {
|
||||
panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// WALK INSERTS
|
||||
for i := 0; i < len(rewrites); i++ {
|
||||
op := rewrites[i]
|
||||
if op == nil {
|
||||
continue
|
||||
}
|
||||
//hack to replicate inheritance in composition
|
||||
_, iok := rewrites[i].(*InsertBeforeOp)
|
||||
_, aok := rewrites[i].(*InsertAfterOp)
|
||||
if !iok && !aok {
|
||||
continue
|
||||
}
|
||||
iop := rewrites[i]
|
||||
// combine current insert with prior if any at same index
|
||||
// deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
|
||||
for j := 0; j < i && j < len(rewrites); j++ {
|
||||
if nextIop, ok := rewrites[j].(*InsertAfterOp); ok {
|
||||
if nextIop.index == iop.GetIndex() {
|
||||
iop.SetText(nextIop.text + iop.GetText())
|
||||
rewrites[j] = nil
|
||||
}
|
||||
}
|
||||
if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok {
|
||||
if prevIop.index == iop.GetIndex() {
|
||||
iop.SetText(iop.GetText() + prevIop.text)
|
||||
rewrites[prevIop.instructionIndex] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// look for replaces where iop.index is in range; error
|
||||
for j := 0; j < i && j < len(rewrites); j++ {
|
||||
if rop, ok := rewrites[j].(*ReplaceOp); ok {
|
||||
if iop.GetIndex() == rop.index {
|
||||
rop.text = iop.GetText() + rop.text
|
||||
rewrites[i] = nil
|
||||
continue
|
||||
}
|
||||
if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex {
|
||||
panic("insert op " + iop.String() + " within boundaries of previous " + rop.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
m := map[int]RewriteOperation{}
|
||||
for i := 0; i < len(rewrites); i++ {
|
||||
op := rewrites[i]
|
||||
if op == nil {
|
||||
continue
|
||||
}
|
||||
if _, ok := m[op.GetIndex()]; ok {
|
||||
panic("should only be one op per index")
|
||||
}
|
||||
m[op.GetIndex()] = op
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
/*
|
||||
Quick fixing Go lack of overloads
|
||||
*/
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
32
e2e/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go
generated
vendored
Normal file
32
e2e/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "fmt"
|
||||
|
||||
type TraceListener struct {
|
||||
parser *BaseParser
|
||||
}
|
||||
|
||||
func NewTraceListener(parser *BaseParser) *TraceListener {
|
||||
tl := new(TraceListener)
|
||||
tl.parser = parser
|
||||
return tl
|
||||
}
|
||||
|
||||
func (t *TraceListener) VisitErrorNode(_ ErrorNode) {
|
||||
}
|
||||
|
||||
func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) {
|
||||
fmt.Println("enter " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
|
||||
}
|
||||
|
||||
func (t *TraceListener) VisitTerminal(node TerminalNode) {
|
||||
fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()])
|
||||
}
|
||||
|
||||
func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) {
|
||||
fmt.Println("exit " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
|
||||
}
|
439
e2e/vendor/github.com/antlr4-go/antlr/v4/transition.go
generated
vendored
Normal file
439
e2e/vendor/github.com/antlr4-go/antlr/v4/transition.go
generated
vendored
Normal file
@ -0,0 +1,439 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// atom, set, epsilon, action, predicate, rule transitions.
|
||||
//
|
||||
// <p>This is a one way link. It emanates from a state (usually via a list of
|
||||
// transitions) and has a target state.</p>
|
||||
//
|
||||
// <p>Since we never have to change the ATN transitions once we construct it,
|
||||
// the states. We'll use the term Edge for the DFA to distinguish them from
|
||||
// ATN transitions.</p>
|
||||
|
||||
type Transition interface {
|
||||
getTarget() ATNState
|
||||
setTarget(ATNState)
|
||||
getIsEpsilon() bool
|
||||
getLabel() *IntervalSet
|
||||
getSerializationType() int
|
||||
Matches(int, int, int) bool
|
||||
}
|
||||
|
||||
type BaseTransition struct {
|
||||
target ATNState
|
||||
isEpsilon bool
|
||||
label int
|
||||
intervalSet *IntervalSet
|
||||
serializationType int
|
||||
}
|
||||
|
||||
func NewBaseTransition(target ATNState) *BaseTransition {
|
||||
|
||||
if target == nil {
|
||||
panic("target cannot be nil.")
|
||||
}
|
||||
|
||||
t := new(BaseTransition)
|
||||
|
||||
t.target = target
|
||||
// Are we epsilon, action, sempred?
|
||||
t.isEpsilon = false
|
||||
t.intervalSet = nil
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *BaseTransition) getTarget() ATNState {
|
||||
return t.target
|
||||
}
|
||||
|
||||
func (t *BaseTransition) setTarget(s ATNState) {
|
||||
t.target = s
|
||||
}
|
||||
|
||||
func (t *BaseTransition) getIsEpsilon() bool {
|
||||
return t.isEpsilon
|
||||
}
|
||||
|
||||
func (t *BaseTransition) getLabel() *IntervalSet {
|
||||
return t.intervalSet
|
||||
}
|
||||
|
||||
func (t *BaseTransition) getSerializationType() int {
|
||||
return t.serializationType
|
||||
}
|
||||
|
||||
func (t *BaseTransition) Matches(_, _, _ int) bool {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
const (
|
||||
TransitionEPSILON = 1
|
||||
TransitionRANGE = 2
|
||||
TransitionRULE = 3
|
||||
TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
|
||||
TransitionATOM = 5
|
||||
TransitionACTION = 6
|
||||
TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
|
||||
TransitionNOTSET = 8
|
||||
TransitionWILDCARD = 9
|
||||
TransitionPRECEDENCE = 10
|
||||
)
|
||||
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var TransitionserializationNames = []string{
|
||||
"INVALID",
|
||||
"EPSILON",
|
||||
"RANGE",
|
||||
"RULE",
|
||||
"PREDICATE",
|
||||
"ATOM",
|
||||
"ACTION",
|
||||
"SET",
|
||||
"NOT_SET",
|
||||
"WILDCARD",
|
||||
"PRECEDENCE",
|
||||
}
|
||||
|
||||
//var TransitionserializationTypes struct {
|
||||
// EpsilonTransition int
|
||||
// RangeTransition int
|
||||
// RuleTransition int
|
||||
// PredicateTransition int
|
||||
// AtomTransition int
|
||||
// ActionTransition int
|
||||
// SetTransition int
|
||||
// NotSetTransition int
|
||||
// WildcardTransition int
|
||||
// PrecedencePredicateTransition int
|
||||
//}{
|
||||
// TransitionEPSILON,
|
||||
// TransitionRANGE,
|
||||
// TransitionRULE,
|
||||
// TransitionPREDICATE,
|
||||
// TransitionATOM,
|
||||
// TransitionACTION,
|
||||
// TransitionSET,
|
||||
// TransitionNOTSET,
|
||||
// TransitionWILDCARD,
|
||||
// TransitionPRECEDENCE
|
||||
//}
|
||||
|
||||
// AtomTransition
|
||||
// TODO: make all transitions sets? no, should remove set edges
|
||||
type AtomTransition struct {
|
||||
BaseTransition
|
||||
}
|
||||
|
||||
func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
|
||||
t := &AtomTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionATOM,
|
||||
label: intervalSet,
|
||||
isEpsilon: false,
|
||||
},
|
||||
}
|
||||
t.intervalSet = t.makeLabel()
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *AtomTransition) makeLabel() *IntervalSet {
|
||||
s := NewIntervalSet()
|
||||
s.addOne(t.label)
|
||||
return s
|
||||
}
|
||||
|
||||
func (t *AtomTransition) Matches(symbol, _, _ int) bool {
|
||||
return t.label == symbol
|
||||
}
|
||||
|
||||
func (t *AtomTransition) String() string {
|
||||
return strconv.Itoa(t.label)
|
||||
}
|
||||
|
||||
type RuleTransition struct {
|
||||
BaseTransition
|
||||
followState ATNState
|
||||
ruleIndex, precedence int
|
||||
}
|
||||
|
||||
func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
|
||||
return &RuleTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: ruleStart,
|
||||
isEpsilon: true,
|
||||
serializationType: TransitionRULE,
|
||||
},
|
||||
ruleIndex: ruleIndex,
|
||||
precedence: precedence,
|
||||
followState: followState,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *RuleTransition) Matches(_, _, _ int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type EpsilonTransition struct {
|
||||
BaseTransition
|
||||
outermostPrecedenceReturn int
|
||||
}
|
||||
|
||||
func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
|
||||
return &EpsilonTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionEPSILON,
|
||||
isEpsilon: true,
|
||||
},
|
||||
outermostPrecedenceReturn: outermostPrecedenceReturn,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *EpsilonTransition) Matches(_, _, _ int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *EpsilonTransition) String() string {
|
||||
return "epsilon"
|
||||
}
|
||||
|
||||
type RangeTransition struct {
|
||||
BaseTransition
|
||||
start, stop int
|
||||
}
|
||||
|
||||
func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
|
||||
t := &RangeTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionRANGE,
|
||||
isEpsilon: false,
|
||||
},
|
||||
start: start,
|
||||
stop: stop,
|
||||
}
|
||||
t.intervalSet = t.makeLabel()
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *RangeTransition) makeLabel() *IntervalSet {
|
||||
s := NewIntervalSet()
|
||||
s.addRange(t.start, t.stop)
|
||||
return s
|
||||
}
|
||||
|
||||
func (t *RangeTransition) Matches(symbol, _, _ int) bool {
|
||||
return symbol >= t.start && symbol <= t.stop
|
||||
}
|
||||
|
||||
func (t *RangeTransition) String() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteByte('\'')
|
||||
sb.WriteRune(rune(t.start))
|
||||
sb.WriteString("'..'")
|
||||
sb.WriteRune(rune(t.stop))
|
||||
sb.WriteByte('\'')
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
type AbstractPredicateTransition interface {
|
||||
Transition
|
||||
IAbstractPredicateTransitionFoo()
|
||||
}
|
||||
|
||||
type BaseAbstractPredicateTransition struct {
|
||||
BaseTransition
|
||||
}
|
||||
|
||||
func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
|
||||
return &BaseAbstractPredicateTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
|
||||
|
||||
type PredicateTransition struct {
|
||||
BaseAbstractPredicateTransition
|
||||
isCtxDependent bool
|
||||
ruleIndex, predIndex int
|
||||
}
|
||||
|
||||
func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
|
||||
return &PredicateTransition{
|
||||
BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionPREDICATE,
|
||||
isEpsilon: true,
|
||||
},
|
||||
},
|
||||
isCtxDependent: isCtxDependent,
|
||||
ruleIndex: ruleIndex,
|
||||
predIndex: predIndex,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *PredicateTransition) Matches(_, _, _ int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *PredicateTransition) getPredicate() *Predicate {
|
||||
return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
|
||||
}
|
||||
|
||||
func (t *PredicateTransition) String() string {
|
||||
return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
|
||||
}
|
||||
|
||||
type ActionTransition struct {
|
||||
BaseTransition
|
||||
isCtxDependent bool
|
||||
ruleIndex, actionIndex, predIndex int
|
||||
}
|
||||
|
||||
func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
|
||||
return &ActionTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionACTION,
|
||||
isEpsilon: true,
|
||||
},
|
||||
isCtxDependent: isCtxDependent,
|
||||
ruleIndex: ruleIndex,
|
||||
actionIndex: actionIndex,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *ActionTransition) Matches(_, _, _ int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *ActionTransition) String() string {
|
||||
return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
|
||||
}
|
||||
|
||||
type SetTransition struct {
|
||||
BaseTransition
|
||||
}
|
||||
|
||||
func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
|
||||
t := &SetTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionSET,
|
||||
},
|
||||
}
|
||||
|
||||
if set != nil {
|
||||
t.intervalSet = set
|
||||
} else {
|
||||
t.intervalSet = NewIntervalSet()
|
||||
t.intervalSet.addOne(TokenInvalidType)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *SetTransition) Matches(symbol, _, _ int) bool {
|
||||
return t.intervalSet.contains(symbol)
|
||||
}
|
||||
|
||||
func (t *SetTransition) String() string {
|
||||
return t.intervalSet.String()
|
||||
}
|
||||
|
||||
type NotSetTransition struct {
|
||||
SetTransition
|
||||
}
|
||||
|
||||
func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
|
||||
t := &NotSetTransition{
|
||||
SetTransition: SetTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionNOTSET,
|
||||
},
|
||||
},
|
||||
}
|
||||
if set != nil {
|
||||
t.intervalSet = set
|
||||
} else {
|
||||
t.intervalSet = NewIntervalSet()
|
||||
t.intervalSet.addOne(TokenInvalidType)
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
|
||||
}
|
||||
|
||||
func (t *NotSetTransition) String() string {
|
||||
return "~" + t.intervalSet.String()
|
||||
}
|
||||
|
||||
type WildcardTransition struct {
|
||||
BaseTransition
|
||||
}
|
||||
|
||||
func NewWildcardTransition(target ATNState) *WildcardTransition {
|
||||
return &WildcardTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionWILDCARD,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
|
||||
}
|
||||
|
||||
func (t *WildcardTransition) String() string {
|
||||
return "."
|
||||
}
|
||||
|
||||
type PrecedencePredicateTransition struct {
|
||||
BaseAbstractPredicateTransition
|
||||
precedence int
|
||||
}
|
||||
|
||||
func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
|
||||
return &PrecedencePredicateTransition{
|
||||
BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionPRECEDENCE,
|
||||
isEpsilon: true,
|
||||
},
|
||||
},
|
||||
precedence: precedence,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *PrecedencePredicateTransition) Matches(_, _, _ int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
|
||||
return NewPrecedencePredicate(t.precedence)
|
||||
}
|
||||
|
||||
func (t *PrecedencePredicateTransition) String() string {
|
||||
return fmt.Sprint(t.precedence) + " >= _p"
|
||||
}
|
304
e2e/vendor/github.com/antlr4-go/antlr/v4/tree.go
generated
vendored
Normal file
304
e2e/vendor/github.com/antlr4-go/antlr/v4/tree.go
generated
vendored
Normal file
@ -0,0 +1,304 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// The basic notion of a tree has a parent, a payload, and a list of children.
|
||||
// It is the most abstract interface for all the trees used by ANTLR.
|
||||
///
|
||||
|
||||
var TreeInvalidInterval = NewInterval(-1, -2)
|
||||
|
||||
type Tree interface {
|
||||
GetParent() Tree
|
||||
SetParent(Tree)
|
||||
GetPayload() interface{}
|
||||
GetChild(i int) Tree
|
||||
GetChildCount() int
|
||||
GetChildren() []Tree
|
||||
}
|
||||
|
||||
type SyntaxTree interface {
|
||||
Tree
|
||||
GetSourceInterval() Interval
|
||||
}
|
||||
|
||||
type ParseTree interface {
|
||||
SyntaxTree
|
||||
Accept(Visitor ParseTreeVisitor) interface{}
|
||||
GetText() string
|
||||
ToStringTree([]string, Recognizer) string
|
||||
}
|
||||
|
||||
type RuleNode interface {
|
||||
ParseTree
|
||||
GetRuleContext() RuleContext
|
||||
}
|
||||
|
||||
type TerminalNode interface {
|
||||
ParseTree
|
||||
GetSymbol() Token
|
||||
}
|
||||
|
||||
type ErrorNode interface {
|
||||
TerminalNode
|
||||
|
||||
errorNode()
|
||||
}
|
||||
|
||||
type ParseTreeVisitor interface {
|
||||
Visit(tree ParseTree) interface{}
|
||||
VisitChildren(node RuleNode) interface{}
|
||||
VisitTerminal(node TerminalNode) interface{}
|
||||
VisitErrorNode(node ErrorNode) interface{}
|
||||
}
|
||||
|
||||
type BaseParseTreeVisitor struct{}
|
||||
|
||||
var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
|
||||
|
||||
func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
|
||||
func (v *BaseParseTreeVisitor) VisitChildren(_ RuleNode) interface{} { return nil }
|
||||
func (v *BaseParseTreeVisitor) VisitTerminal(_ TerminalNode) interface{} { return nil }
|
||||
func (v *BaseParseTreeVisitor) VisitErrorNode(_ ErrorNode) interface{} { return nil }
|
||||
|
||||
// TODO: Implement this?
|
||||
//func (this ParseTreeVisitor) Visit(ctx) {
|
||||
// if (Utils.isArray(ctx)) {
|
||||
// self := this
|
||||
// return ctx.map(function(child) { return VisitAtom(self, child)})
|
||||
// } else {
|
||||
// return VisitAtom(this, ctx)
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//func VisitAtom(Visitor, ctx) {
|
||||
// if (ctx.parser == nil) { //is terminal
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// name := ctx.parser.ruleNames[ctx.ruleIndex]
|
||||
// funcName := "Visit" + Utils.titleCase(name)
|
||||
//
|
||||
// return Visitor[funcName](ctx)
|
||||
//}
|
||||
|
||||
type ParseTreeListener interface {
|
||||
VisitTerminal(node TerminalNode)
|
||||
VisitErrorNode(node ErrorNode)
|
||||
EnterEveryRule(ctx ParserRuleContext)
|
||||
ExitEveryRule(ctx ParserRuleContext)
|
||||
}
|
||||
|
||||
type BaseParseTreeListener struct{}
|
||||
|
||||
var _ ParseTreeListener = &BaseParseTreeListener{}
|
||||
|
||||
func (l *BaseParseTreeListener) VisitTerminal(_ TerminalNode) {}
|
||||
func (l *BaseParseTreeListener) VisitErrorNode(_ ErrorNode) {}
|
||||
func (l *BaseParseTreeListener) EnterEveryRule(_ ParserRuleContext) {}
|
||||
func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {}
|
||||
|
||||
type TerminalNodeImpl struct {
|
||||
parentCtx RuleContext
|
||||
symbol Token
|
||||
}
|
||||
|
||||
var _ TerminalNode = &TerminalNodeImpl{}
|
||||
|
||||
func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
|
||||
tn := new(TerminalNodeImpl)
|
||||
|
||||
tn.parentCtx = nil
|
||||
tn.symbol = symbol
|
||||
|
||||
return tn
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetChild(_ int) Tree {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetChildren() []Tree {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) SetChildren(_ []Tree) {
|
||||
panic("Cannot set children on terminal node")
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetSymbol() Token {
|
||||
return t.symbol
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetParent() Tree {
|
||||
return t.parentCtx
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) SetParent(tree Tree) {
|
||||
t.parentCtx = tree.(RuleContext)
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetPayload() interface{} {
|
||||
return t.symbol
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetSourceInterval() Interval {
|
||||
if t.symbol == nil {
|
||||
return TreeInvalidInterval
|
||||
}
|
||||
tokenIndex := t.symbol.GetTokenIndex()
|
||||
return NewInterval(tokenIndex, tokenIndex)
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetChildCount() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
|
||||
return v.VisitTerminal(t)
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetText() string {
|
||||
return t.symbol.GetText()
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) String() string {
|
||||
if t.symbol.GetTokenType() == TokenEOF {
|
||||
return "<EOF>"
|
||||
}
|
||||
|
||||
return t.symbol.GetText()
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) ToStringTree(_ []string, _ Recognizer) string {
|
||||
return t.String()
|
||||
}
|
||||
|
||||
// Represents a token that was consumed during reSynchronization
|
||||
// rather than during a valid Match operation. For example,
|
||||
// we will create this kind of a node during single token insertion
|
||||
// and deletion as well as during "consume until error recovery set"
|
||||
// upon no viable alternative exceptions.
|
||||
|
||||
type ErrorNodeImpl struct {
|
||||
*TerminalNodeImpl
|
||||
}
|
||||
|
||||
var _ ErrorNode = &ErrorNodeImpl{}
|
||||
|
||||
func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
|
||||
en := new(ErrorNodeImpl)
|
||||
en.TerminalNodeImpl = NewTerminalNodeImpl(token)
|
||||
return en
|
||||
}
|
||||
|
||||
func (e *ErrorNodeImpl) errorNode() {}
|
||||
|
||||
func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
|
||||
return v.VisitErrorNode(e)
|
||||
}
|
||||
|
||||
type ParseTreeWalker struct {
|
||||
}
|
||||
|
||||
func NewParseTreeWalker() *ParseTreeWalker {
|
||||
return new(ParseTreeWalker)
|
||||
}
|
||||
|
||||
// Walk performs a walk on the given parse tree starting at the root and going down recursively
|
||||
// with depth-first search. On each node, [EnterRule] is called before
|
||||
// recursively walking down into child nodes, then [ExitRule] is called after the recursive call to wind up.
|
||||
func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
|
||||
switch tt := t.(type) {
|
||||
case ErrorNode:
|
||||
listener.VisitErrorNode(tt)
|
||||
case TerminalNode:
|
||||
listener.VisitTerminal(tt)
|
||||
default:
|
||||
p.EnterRule(listener, t.(RuleNode))
|
||||
for i := 0; i < t.GetChildCount(); i++ {
|
||||
child := t.GetChild(i)
|
||||
p.Walk(listener, child)
|
||||
}
|
||||
p.ExitRule(listener, t.(RuleNode))
|
||||
}
|
||||
}
|
||||
|
||||
// EnterRule enters a grammar rule by first triggering the generic event [ParseTreeListener].[EnterEveryRule]
|
||||
// then by triggering the event specific to the given parse tree node
|
||||
func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
|
||||
ctx := r.GetRuleContext().(ParserRuleContext)
|
||||
listener.EnterEveryRule(ctx)
|
||||
ctx.EnterRule(listener)
|
||||
}
|
||||
|
||||
// ExitRule exits a grammar rule by first triggering the event specific to the given parse tree node
|
||||
// then by triggering the generic event [ParseTreeListener].ExitEveryRule
|
||||
func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
|
||||
ctx := r.GetRuleContext().(ParserRuleContext)
|
||||
ctx.ExitRule(listener)
|
||||
listener.ExitEveryRule(ctx)
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var ParseTreeWalkerDefault = NewParseTreeWalker()
|
||||
|
||||
type IterativeParseTreeWalker struct {
|
||||
*ParseTreeWalker
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewIterativeParseTreeWalker() *IterativeParseTreeWalker {
|
||||
return new(IterativeParseTreeWalker)
|
||||
}
|
||||
|
||||
func (i *IterativeParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
|
||||
var stack []Tree
|
||||
var indexStack []int
|
||||
currentNode := t
|
||||
currentIndex := 0
|
||||
|
||||
for currentNode != nil {
|
||||
// pre-order visit
|
||||
switch tt := currentNode.(type) {
|
||||
case ErrorNode:
|
||||
listener.VisitErrorNode(tt)
|
||||
case TerminalNode:
|
||||
listener.VisitTerminal(tt)
|
||||
default:
|
||||
i.EnterRule(listener, currentNode.(RuleNode))
|
||||
}
|
||||
// Move down to first child, if exists
|
||||
if currentNode.GetChildCount() > 0 {
|
||||
stack = append(stack, currentNode)
|
||||
indexStack = append(indexStack, currentIndex)
|
||||
currentIndex = 0
|
||||
currentNode = currentNode.GetChild(0)
|
||||
continue
|
||||
}
|
||||
|
||||
for {
|
||||
// post-order visit
|
||||
if ruleNode, ok := currentNode.(RuleNode); ok {
|
||||
i.ExitRule(listener, ruleNode)
|
||||
}
|
||||
// No parent, so no siblings
|
||||
if len(stack) == 0 {
|
||||
currentNode = nil
|
||||
currentIndex = 0
|
||||
break
|
||||
}
|
||||
// Move to next sibling if possible
|
||||
currentIndex++
|
||||
if stack[len(stack)-1].GetChildCount() > currentIndex {
|
||||
currentNode = stack[len(stack)-1].GetChild(currentIndex)
|
||||
break
|
||||
}
|
||||
// No next, sibling, so move up
|
||||
currentNode, stack = stack[len(stack)-1], stack[:len(stack)-1]
|
||||
currentIndex, indexStack = indexStack[len(indexStack)-1], indexStack[:len(indexStack)-1]
|
||||
}
|
||||
}
|
||||
}
|
142
e2e/vendor/github.com/antlr4-go/antlr/v4/trees.go
generated
vendored
Normal file
142
e2e/vendor/github.com/antlr4-go/antlr/v4/trees.go
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "fmt"
|
||||
|
||||
/** A set of utility routines useful for all kinds of ANTLR trees. */
|
||||
|
||||
// TreesStringTree prints out a whole tree in LISP form. [getNodeText] is used on the
|
||||
// node payloads to get the text for the nodes. Detects parse trees and extracts data appropriately.
|
||||
func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
|
||||
|
||||
if recog != nil {
|
||||
ruleNames = recog.GetRuleNames()
|
||||
}
|
||||
|
||||
s := TreesGetNodeText(tree, ruleNames, nil)
|
||||
|
||||
s = EscapeWhitespace(s, false)
|
||||
c := tree.GetChildCount()
|
||||
if c == 0 {
|
||||
return s
|
||||
}
|
||||
res := "(" + s + " "
|
||||
if c > 0 {
|
||||
s = TreesStringTree(tree.GetChild(0), ruleNames, nil)
|
||||
res += s
|
||||
}
|
||||
for i := 1; i < c; i++ {
|
||||
s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
|
||||
res += " " + s
|
||||
}
|
||||
res += ")"
|
||||
return res
|
||||
}
|
||||
|
||||
func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
|
||||
if recog != nil {
|
||||
ruleNames = recog.GetRuleNames()
|
||||
}
|
||||
|
||||
if ruleNames != nil {
|
||||
switch t2 := t.(type) {
|
||||
case RuleNode:
|
||||
t3 := t2.GetRuleContext()
|
||||
altNumber := t3.GetAltNumber()
|
||||
|
||||
if altNumber != ATNInvalidAltNumber {
|
||||
return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber)
|
||||
}
|
||||
return ruleNames[t3.GetRuleIndex()]
|
||||
case ErrorNode:
|
||||
return fmt.Sprint(t2)
|
||||
case TerminalNode:
|
||||
if t2.GetSymbol() != nil {
|
||||
return t2.GetSymbol().GetText()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// no recognition for rule names
|
||||
payload := t.GetPayload()
|
||||
if p2, ok := payload.(Token); ok {
|
||||
return p2.GetText()
|
||||
}
|
||||
|
||||
return fmt.Sprint(t.GetPayload())
|
||||
}
|
||||
|
||||
// TreesGetChildren returns am ordered list of all children of this node
|
||||
//
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func TreesGetChildren(t Tree) []Tree {
|
||||
list := make([]Tree, 0)
|
||||
for i := 0; i < t.GetChildCount(); i++ {
|
||||
list = append(list, t.GetChild(i))
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root
|
||||
// and the last node is the parent of this node.
|
||||
//
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func TreesgetAncestors(t Tree) []Tree {
|
||||
ancestors := make([]Tree, 0)
|
||||
t = t.GetParent()
|
||||
for t != nil {
|
||||
f := []Tree{t}
|
||||
ancestors = append(f, ancestors...)
|
||||
t = t.GetParent()
|
||||
}
|
||||
return ancestors
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
|
||||
return TreesfindAllNodes(t, ttype, true)
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
|
||||
return TreesfindAllNodes(t, ruleIndex, false)
|
||||
}
|
||||
|
||||
func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree {
|
||||
nodes := make([]ParseTree, 0)
|
||||
treesFindAllNodes(t, index, findTokens, &nodes)
|
||||
return nodes
|
||||
}
|
||||
|
||||
func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) {
|
||||
// check this node (the root) first
|
||||
|
||||
t2, ok := t.(TerminalNode)
|
||||
t3, ok2 := t.(ParserRuleContext)
|
||||
|
||||
if findTokens && ok {
|
||||
if t2.GetSymbol().GetTokenType() == index {
|
||||
*nodes = append(*nodes, t2)
|
||||
}
|
||||
} else if !findTokens && ok2 {
|
||||
if t3.GetRuleIndex() == index {
|
||||
*nodes = append(*nodes, t3)
|
||||
}
|
||||
}
|
||||
// check children
|
||||
for i := 0; i < t.GetChildCount(); i++ {
|
||||
treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes)
|
||||
}
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func TreesDescendants(t ParseTree) []ParseTree {
|
||||
nodes := []ParseTree{t}
|
||||
for i := 0; i < t.GetChildCount(); i++ {
|
||||
nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...)
|
||||
}
|
||||
return nodes
|
||||
}
|
328
e2e/vendor/github.com/antlr4-go/antlr/v4/utils.go
generated
vendored
Normal file
328
e2e/vendor/github.com/antlr4-go/antlr/v4/utils.go
generated
vendored
Normal file
@ -0,0 +1,328 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func intMin(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func intMax(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// A simple integer stack
|
||||
|
||||
type IntStack []int
|
||||
|
||||
var ErrEmptyStack = errors.New("stack is empty")
|
||||
|
||||
func (s *IntStack) Pop() (int, error) {
|
||||
l := len(*s) - 1
|
||||
if l < 0 {
|
||||
return 0, ErrEmptyStack
|
||||
}
|
||||
v := (*s)[l]
|
||||
*s = (*s)[0:l]
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (s *IntStack) Push(e int) {
|
||||
*s = append(*s, e)
|
||||
}
|
||||
|
||||
const bitsPerWord = 64
|
||||
|
||||
func indexForBit(bit int) int {
|
||||
return bit / bitsPerWord
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction,GoUnusedFunction
|
||||
func wordForBit(data []uint64, bit int) uint64 {
|
||||
idx := indexForBit(bit)
|
||||
if idx >= len(data) {
|
||||
return 0
|
||||
}
|
||||
return data[idx]
|
||||
}
|
||||
|
||||
func maskForBit(bit int) uint64 {
|
||||
return uint64(1) << (bit % bitsPerWord)
|
||||
}
|
||||
|
||||
func wordsNeeded(bit int) int {
|
||||
return indexForBit(bit) + 1
|
||||
}
|
||||
|
||||
type BitSet struct {
|
||||
data []uint64
|
||||
}
|
||||
|
||||
// NewBitSet creates a new bitwise set
|
||||
// TODO: See if we can replace with the standard library's BitSet
|
||||
func NewBitSet() *BitSet {
|
||||
return &BitSet{}
|
||||
}
|
||||
|
||||
func (b *BitSet) add(value int) {
|
||||
idx := indexForBit(value)
|
||||
if idx >= len(b.data) {
|
||||
size := wordsNeeded(value)
|
||||
data := make([]uint64, size)
|
||||
copy(data, b.data)
|
||||
b.data = data
|
||||
}
|
||||
b.data[idx] |= maskForBit(value)
|
||||
}
|
||||
|
||||
func (b *BitSet) clear(index int) {
|
||||
idx := indexForBit(index)
|
||||
if idx >= len(b.data) {
|
||||
return
|
||||
}
|
||||
b.data[idx] &= ^maskForBit(index)
|
||||
}
|
||||
|
||||
func (b *BitSet) or(set *BitSet) {
|
||||
// Get min size necessary to represent the bits in both sets.
|
||||
bLen := b.minLen()
|
||||
setLen := set.minLen()
|
||||
maxLen := intMax(bLen, setLen)
|
||||
if maxLen > len(b.data) {
|
||||
// Increase the size of len(b.data) to represent the bits in both sets.
|
||||
data := make([]uint64, maxLen)
|
||||
copy(data, b.data)
|
||||
b.data = data
|
||||
}
|
||||
// len(b.data) is at least setLen.
|
||||
for i := 0; i < setLen; i++ {
|
||||
b.data[i] |= set.data[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BitSet) remove(value int) {
|
||||
b.clear(value)
|
||||
}
|
||||
|
||||
func (b *BitSet) contains(value int) bool {
|
||||
idx := indexForBit(value)
|
||||
if idx >= len(b.data) {
|
||||
return false
|
||||
}
|
||||
return (b.data[idx] & maskForBit(value)) != 0
|
||||
}
|
||||
|
||||
func (b *BitSet) minValue() int {
|
||||
for i, v := range b.data {
|
||||
if v == 0 {
|
||||
continue
|
||||
}
|
||||
return i*bitsPerWord + bits.TrailingZeros64(v)
|
||||
}
|
||||
return 2147483647
|
||||
}
|
||||
|
||||
func (b *BitSet) equals(other interface{}) bool {
|
||||
otherBitSet, ok := other.(*BitSet)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if b == otherBitSet {
|
||||
return true
|
||||
}
|
||||
|
||||
// We only compare set bits, so we cannot rely on the two slices having the same size. Its
|
||||
// possible for two BitSets to have different slice lengths but the same set bits. So we only
|
||||
// compare the relevant words and ignore the trailing zeros.
|
||||
bLen := b.minLen()
|
||||
otherLen := otherBitSet.minLen()
|
||||
|
||||
if bLen != otherLen {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := 0; i < bLen; i++ {
|
||||
if b.data[i] != otherBitSet.data[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BitSet) minLen() int {
|
||||
for i := len(b.data); i > 0; i-- {
|
||||
if b.data[i-1] != 0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (b *BitSet) length() int {
|
||||
cnt := 0
|
||||
for _, val := range b.data {
|
||||
cnt += bits.OnesCount64(val)
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func (b *BitSet) String() string {
|
||||
vals := make([]string, 0, b.length())
|
||||
|
||||
for i, v := range b.data {
|
||||
for v != 0 {
|
||||
n := bits.TrailingZeros64(v)
|
||||
vals = append(vals, strconv.Itoa(i*bitsPerWord+n))
|
||||
v &= ^(uint64(1) << n)
|
||||
}
|
||||
}
|
||||
|
||||
return "{" + strings.Join(vals, ", ") + "}"
|
||||
}
|
||||
|
||||
type AltDict struct {
|
||||
data map[string]interface{}
|
||||
}
|
||||
|
||||
func NewAltDict() *AltDict {
|
||||
d := new(AltDict)
|
||||
d.data = make(map[string]interface{})
|
||||
return d
|
||||
}
|
||||
|
||||
func (a *AltDict) Get(key string) interface{} {
|
||||
key = "k-" + key
|
||||
return a.data[key]
|
||||
}
|
||||
|
||||
func (a *AltDict) put(key string, value interface{}) {
|
||||
key = "k-" + key
|
||||
a.data[key] = value
|
||||
}
|
||||
|
||||
func (a *AltDict) values() []interface{} {
|
||||
vs := make([]interface{}, len(a.data))
|
||||
i := 0
|
||||
for _, v := range a.data {
|
||||
vs[i] = v
|
||||
i++
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
func EscapeWhitespace(s string, escapeSpaces bool) string {
|
||||
|
||||
s = strings.Replace(s, "\t", "\\t", -1)
|
||||
s = strings.Replace(s, "\n", "\\n", -1)
|
||||
s = strings.Replace(s, "\r", "\\r", -1)
|
||||
if escapeSpaces {
|
||||
s = strings.Replace(s, " ", "\u00B7", -1)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func TerminalNodeToStringArray(sa []TerminalNode) []string {
|
||||
st := make([]string, len(sa))
|
||||
|
||||
for i, s := range sa {
|
||||
st[i] = fmt.Sprintf("%v", s)
|
||||
}
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func PrintArrayJavaStyle(sa []string) string {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
buffer.WriteString("[")
|
||||
|
||||
for i, s := range sa {
|
||||
buffer.WriteString(s)
|
||||
if i != len(sa)-1 {
|
||||
buffer.WriteString(", ")
|
||||
}
|
||||
}
|
||||
|
||||
buffer.WriteString("]")
|
||||
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// murmur hash
|
||||
func murmurInit(seed int) int {
|
||||
return seed
|
||||
}
|
||||
|
||||
func murmurUpdate(h int, value int) int {
|
||||
const c1 uint32 = 0xCC9E2D51
|
||||
const c2 uint32 = 0x1B873593
|
||||
const r1 uint32 = 15
|
||||
const r2 uint32 = 13
|
||||
const m uint32 = 5
|
||||
const n uint32 = 0xE6546B64
|
||||
|
||||
k := uint32(value)
|
||||
k *= c1
|
||||
k = (k << r1) | (k >> (32 - r1))
|
||||
k *= c2
|
||||
|
||||
hash := uint32(h) ^ k
|
||||
hash = (hash << r2) | (hash >> (32 - r2))
|
||||
hash = hash*m + n
|
||||
return int(hash)
|
||||
}
|
||||
|
||||
func murmurFinish(h int, numberOfWords int) int {
|
||||
var hash = uint32(h)
|
||||
hash ^= uint32(numberOfWords) << 2
|
||||
hash ^= hash >> 16
|
||||
hash *= 0x85ebca6b
|
||||
hash ^= hash >> 13
|
||||
hash *= 0xc2b2ae35
|
||||
hash ^= hash >> 16
|
||||
|
||||
return int(hash)
|
||||
}
|
||||
|
||||
func isDirectory(dir string) (bool, error) {
|
||||
fileInfo, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, syscall.ENOENT):
|
||||
// The given directory does not exist, so we will try to create it
|
||||
//
|
||||
err = os.MkdirAll(dir, 0755)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
case err != nil:
|
||||
return false, err
|
||||
default:
|
||||
}
|
||||
}
|
||||
return fileInfo.IsDir(), err
|
||||
}
|
14
e2e/vendor/github.com/asaskevich/govalidator/.travis.yml
generated
vendored
Normal file
14
e2e/vendor/github.com/asaskevich/govalidator/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
||||
|
||||
notifications:
|
||||
email:
|
||||
- bwatas@gmail.com
|
63
e2e/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
generated
vendored
Normal file
63
e2e/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
#### Support
|
||||
If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
|
||||
|
||||
#### What to contribute
|
||||
If you don't know what to do, there are some features and functions that need to be done
|
||||
|
||||
- [ ] Refactor code
|
||||
- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
|
||||
- [ ] Create actual list of contributors and projects that currently using this package
|
||||
- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
|
||||
- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
|
||||
- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
|
||||
- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
|
||||
- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
|
||||
- [ ] Implement fuzzing testing
|
||||
- [ ] Implement some struct/map/array utilities
|
||||
- [ ] Implement map/array validation
|
||||
- [ ] Implement benchmarking
|
||||
- [ ] Implement batch of examples
|
||||
- [ ] Look at forks for new features and fixes
|
||||
|
||||
#### Advice
|
||||
Feel free to create what you want, but keep in mind when you implement new features:
|
||||
- Code must be clear and readable, names of variables/constants clearly describes what they are doing
|
||||
- Public functions must be documented and described in source file and added to README.md to the list of available functions
|
||||
- There are must be unit-tests for any new functions and improvements
|
||||
|
||||
## Financial contributions
|
||||
|
||||
We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator).
|
||||
Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed.
|
||||
|
||||
|
||||
## Credits
|
||||
|
||||
|
||||
### Contributors
|
||||
|
||||
Thank you to all the people who have already contributed to govalidator!
|
||||
<a href="graphs/contributors"><img src="https://opencollective.com/govalidator/contributors.svg?width=890" /></a>
|
||||
|
||||
|
||||
### Backers
|
||||
|
||||
Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)]
|
||||
|
||||
<a href="https://opencollective.com/govalidator#backers" target="_blank"><img src="https://opencollective.com/govalidator/backers.svg?width=890"></a>
|
||||
|
||||
|
||||
### Sponsors
|
||||
|
||||
Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor))
|
||||
|
||||
<a href="https://opencollective.com/govalidator/sponsor/0/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/0/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/1/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/1/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/2/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/2/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/3/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/3/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/4/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/4/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/5/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/5/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/6/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/6/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/7/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/7/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/8/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/8/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/9/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/9/avatar.svg"></a>
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user