*: put goyacc tool into vendor (#1499)

This commit is contained in:
tiancaiamao
2016-07-27 10:30:10 +08:00
committed by GitHub
parent 04da3d2f8b
commit c2831f6290
59 changed files with 19696 additions and 5 deletions

View File

@ -37,7 +37,7 @@ LDFLAGS += -X "github.com/pingcap/tidb/util/printer.TiDBGitHash=$(shell git rev-
TARGET = ""
.PHONY: all build install update parser clean todo test gotest interpreter server
.PHONY: all build install update parser clean todo test gotest interpreter server goyacc golex
all: parser build test check
@ -53,9 +53,15 @@ install:
TEMP_FILE = temp_parser_file
parser:
go get github.com/pingcap/goyacc
go get github.com/qiuyesuifeng/golex
golex:
$(GO) get github.com/qiuyesuifeng/golex
goyacc:
rm -rf vendor && ln -s _vendor/vendor vendor
$(GO) install github.com/pingcap/tidb/parser/goyacc
rm -rf vendor
parser: goyacc golex
$(GOYACC) -o /dev/null -xegen $(TEMP_FILE) parser/parser.y
$(GOYACC) -o parser/parser.go -xe $(TEMP_FILE) parser/parser.y 2>&1 | egrep "(shift|reduce)/reduce" | awk '{print} END {if (NR > 0) {print "Find conflict in parser.y. Please check y.output for more information."; system("rm -f $(TEMP_FILE)"); exit 1;}}'
rm -f $(TEMP_FILE)

View File

@ -1,7 +1,7 @@
{
"ImportPath": "github.com/pingcap/tidb",
"GoVersion": "go1.6",
"GodepVersion": "v62",
"GodepVersion": "v63",
"Packages": [
"./..."
],
@ -318,6 +318,26 @@
{
"ImportPath": "github.com/ngaut/systimemon",
"Rev": "c2ca1c75c6af5556eb6ce67994120adf12f2ccef"
},
{
"ImportPath": "github.com/cznic/mathutil",
"Rev": "78ad7f262603437f0ecfebc835d80094f89c8f54"
},
{
"ImportPath": "github.com/cznic/parser/yacc",
"Rev": "31edd927e5b19d1c4a260c41a397e7f81d6694d9"
},
{
"ImportPath": "github.com/cznic/sortutil",
"Rev": "4c7342852e65c2088c981288f2c5610d10b9f7f4"
},
{
"ImportPath": "github.com/cznic/strutil",
"Rev": "1eb03e3cc9d345307a45ec82bd3016cde4bd4464"
},
{
"ImportPath": "github.com/cznic/y",
"Rev": "9fdf92d4aac058959f814606bb729ed50f5e4240"
}
]
}

27
_vendor/vendor/github.com/cznic/golex/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2014 The golex Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

53
_vendor/vendor/github.com/cznic/golex/lex/Makefile generated vendored Normal file
View File

@ -0,0 +1,53 @@
# Copyright (c) 2015 The golex Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
.PHONY: all clean cover cpu editor internalError later mem nuke todo
grep=--include=*.go --include=*.l --include=*.y
all: editor
go vet || true
golint || true
make todo
clean:
go clean
rm -f *~ cpu.test mem.test
cover:
t=$(shell tempfile) ; go test -coverprofile $$t && go tool cover -html $$t && unlink $$t
cpu:
go test -c -o cpu.test
./cpu.test -noerr -test.cpuprofile cpu.out
go tool pprof --lines cpu.test cpu.out
editor: example_test.go
gofmt -l -s -w *.go
go test
go install
example_test.go: example.l
golex -o $@ $<
internalError:
egrep -ho '"internal error.*"' *.go | sort | cat -n
later:
@grep -n $(grep) LATER * || true
@grep -n $(grep) MAYBE * || true
mem:
go test -c -o mem.test
./mem.test -test.bench . -test.memprofile mem.out
go tool pprof --lines --web --alloc_space mem.test mem.out
nuke: clean
go clean -i
todo:
@grep -nr $(grep) ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* * || true
@grep -nr $(grep) TODO * || true
@grep -nr $(grep) BUG * || true
@grep -nr $(grep) [^[:alpha:]]println * || true

410
_vendor/vendor/github.com/cznic/golex/lex/api.go generated vendored Normal file
View File

@ -0,0 +1,410 @@
// Copyright (c) 2015 The golex Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lex
import (
"bytes"
"fmt"
"go/token"
"io"
"os"
)
// BOM handling modes which can be set by the BOMMode Option. Default is BOMIgnoreFirst.
const (
BOMError = iota // BOM is an error anywhere.
BOMIgnoreFirst // Skip BOM if at beginning, report as error if anywhere else.
BOMPassAll // No special handling of BOM.
BOMPassFirst // No special handling of BOM if at beginning, report as error if anywhere else.
)
const (
NonASCII = 0x80 // DefaultRuneClass returns NonASCII for non ASCII runes.
RuneEOF = -1 // Distinct from any valid Unicode rune value.
)
// DefaultRuneClass returns the character class of r. If r is an ASCII code
// then its class equals the ASCII code. Any other rune is of class NonASCII.
//
// DefaultRuneClass is the default implementation Lexer will use to convert
// runes (21 bit entities) to scanner classes (8 bit entities).
//
// Non ASCII aware lexical analyzers will typically use their own
// categorization function. To assign such custom function use the RuneClass
// option.
func DefaultRuneClass(r rune) int {
if r >= 0 && r < 0x80 {
return int(r)
}
return NonASCII
}
// Char represents a rune and its position.
type Char struct {
Rune rune
pos int32
}
// NewChar returns a new Char value.
func NewChar(pos token.Pos, r rune) Char { return Char{pos: int32(pos), Rune: r} }
// IsValid reports whether c is not a zero Char.
func (c Char) IsValid() bool { return c.Pos().IsValid() }
// Pos returns the token.Pos associated with c.
func (c Char) Pos() token.Pos { return token.Pos(c.pos) }
// CharReader is a RuneReader providing additionally explicit position
// information by returning a Char instead of a rune as its first result.
type CharReader interface {
ReadChar() (c Char, size int, err error)
}
// Lexer suports golex[0] generated lexical analyzers.
type Lexer struct {
File *token.File // The *token.File passed to New.
First Char // First remembers the lookahead char when Rule0 was invoked.
Last Char // Last remembers the last Char returned by Next.
Prev Char // Prev remembers the Char previous to Last.
bomMode int // See the BOM* constants.
bytesBuf bytes.Buffer // Used by TokenBytes.
charSrc CharReader // Lexer alternative input.
classf func(rune) int //
errorf func(token.Pos, string) //
lookahead Char // Lookahead if non zero.
mark int // Longest match marker.
off int // Used for File.AddLine.
src io.RuneReader // Lexer input.
tokenBuf []Char // Lexeme collector.
ungetBuf []Char // Unget buffer.
}
// New returns a new *Lexer. The result can be amended using opts.
//
// Non Unicode Input
//
// To consume sources in other encodings and still have exact position
// information, pass an io.RuneReader which returns the next input character
// reencoded as an Unicode rune but returns the size (number of bytes used to
// encode it) of the original character, not the size of its UTF-8
// representation after converted to an Unicode rune. Size is the second
// returned value of io.RuneReader.ReadRune method[4].
//
// When src optionally implements CharReader its ReadChar method is used
// instead of io.ReadRune.
func New(file *token.File, src io.RuneReader, opts ...Option) (*Lexer, error) {
r := &Lexer{
File: file,
bomMode: BOMIgnoreFirst,
classf: DefaultRuneClass,
src: src,
}
if x, ok := src.(CharReader); ok {
r.charSrc = x
}
r.errorf = r.defaultErrorf
for _, o := range opts {
if err := o(r); err != nil {
return nil, err
}
}
return r, nil
}
// Abort handles the situation when the scanner does not successfully recognize
// any token or when an attempt to find the longest match "overruns" from an
// accepting state only to never reach an accepting state again. In the first
// case the scanner was never in an accepting state since last call to Rule0
// and then (true, previousLookahead rune) is returned, effectively consuming a
// single Char token, avoiding scanner stall. Otherwise there was at least one
// accepting scanner state marked using Mark. In this case Abort rollbacks the
// lexer state to the marked state and returns (false, 0). The scanner must
// then execute a prescribed goto statement. For example:
//
// %yyc c
// %yyn c = l.Next()
// %yym l.Mark()
//
// %{
// package foo
//
// import (...)
//
// type lexer struct {
// *lex.Lexer
// ...
// }
//
// func newLexer(...) *lexer {
// return &lexer{
// lex.NewLexer(...),
// ...
// }
// }
//
// func (l *lexer) scan() int {
// c := l.Enter()
// %}
//
// ... more lex defintions
//
// %%
//
// c = l.Rule0()
//
// ... lex rules
//
// %%
//
// if c, ok := l.Abort(); ok {
// return c
// }
//
// goto yyAction
// }
func (l *Lexer) Abort() (int, bool) {
if l.mark >= 0 {
if len(l.tokenBuf) > l.mark {
l.Unget(l.lookahead)
for i := len(l.tokenBuf) - 1; i >= l.mark; i-- {
l.Unget(l.tokenBuf[i])
}
}
l.tokenBuf = l.tokenBuf[:l.mark]
return 0, false
}
switch n := len(l.tokenBuf); n {
case 0: // [] z
c := l.lookahead
l.Next()
return int(c.Rune), true
case 1: // [a] z
return int(l.tokenBuf[0].Rune), true
default: // [a, b, ...], z
c := l.tokenBuf[0] // a
l.Unget(l.lookahead) // z
for i := n - 1; i > 1; i-- {
l.Unget(l.tokenBuf[i]) // ...
}
l.lookahead = l.tokenBuf[1] // b
l.tokenBuf = l.tokenBuf[:1]
return int(c.Rune), true
}
}
func (l *Lexer) class() int { return l.classf(l.lookahead.Rune) }
func (l *Lexer) defaultErrorf(pos token.Pos, msg string) {
l.Error(fmt.Sprintf("%v: %v", l.File.Position(pos), msg))
}
// Enter ensures the lexer has a valid lookahead Char and returns its class.
// Typical use in an .l file
//
// func (l *lexer) scan() lex.Char {
// c := l.Enter()
// ...
func (l *Lexer) Enter() int {
if !l.lookahead.IsValid() {
l.Next()
}
return l.class()
}
// Error Implements yyLexer[2] by printing the msg to stderr.
func (l *Lexer) Error(msg string) {
fmt.Fprintf(os.Stderr, "%s\n", msg)
}
// Lookahead returns the current lookahead.
func (l *Lexer) Lookahead() Char {
if !l.lookahead.IsValid() {
l.Next()
}
return l.lookahead
}
// Mark records the current state of scanner as accepting. It implements the
// golex macro %yym. Typical usage in an .l file:
//
// %yym l.Mark()
func (l *Lexer) Mark() { l.mark = len(l.tokenBuf) }
func (l *Lexer) next() int {
const bom = '\ufeff'
if c := l.lookahead; c.IsValid() {
l.tokenBuf = append(l.tokenBuf, c)
}
if n := len(l.ungetBuf); n != 0 {
l.lookahead = l.ungetBuf[n-1]
l.ungetBuf = l.ungetBuf[:n-1]
return l.class()
}
if l.src == nil {
return RuneEOF
}
var r rune
var sz int
var err error
var pos token.Pos
var c Char
again:
off0 := l.off
switch cs := l.charSrc; {
case cs != nil:
c, sz, err = cs.ReadChar()
r = c.Rune
pos = c.Pos()
default:
r, sz, err = l.src.ReadRune()
pos = l.File.Pos(l.off)
}
l.off += sz
if err != nil {
l.src = nil
r = RuneEOF
if err != io.EOF {
l.errorf(pos, err.Error())
}
}
if r == bom {
switch l.bomMode {
default:
fallthrough
case BOMIgnoreFirst:
if off0 != 0 {
l.errorf(pos, "unicode (UTF-8) BOM in middle of file")
}
goto again
case BOMPassAll:
// nop
case BOMPassFirst:
if off0 != 0 {
l.errorf(pos, "unicode (UTF-8) BOM in middle of file")
goto again
}
case BOMError:
switch {
case off0 == 0:
l.errorf(pos, "unicode (UTF-8) BOM at beginnig of file")
default:
l.errorf(pos, "unicode (UTF-8) BOM in middle of file")
}
goto again
}
}
l.lookahead = NewChar(pos, r)
if r == '\n' {
l.File.AddLine(l.off)
}
return l.class()
}
// Next advances the scanner for one rune and returns the respective character
// class of the new lookahead. Typical usage in an .l file:
//
// %yyn c = l.Next()
func (l *Lexer) Next() int {
l.Prev = l.Last
r := l.next()
l.Last = l.lookahead
return r
}
// Offset returns the current reading offset of the lexer's source.
func (l *Lexer) Offset() int { return l.off }
// Rule0 initializes the scanner state before the attempt to recognize a token
// starts. The token collecting buffer is cleared. Rule0 records the current
// lookahead in l.First and returns its class. Typical usage in an .l file:
//
// ... lex definitions
//
// %%
//
// c := l.Rule0()
//
// first-pattern-regexp
func (l *Lexer) Rule0() int {
if !l.lookahead.IsValid() {
l.Next()
}
l.First = l.lookahead
l.mark = -1
if len(l.tokenBuf) > 1<<18 { //DONE constant tuned
l.tokenBuf = nil
} else {
l.tokenBuf = l.tokenBuf[:0]
}
return l.class()
}
// Token returns the currently collected token chars. The result is R/O.
func (l *Lexer) Token() []Char { return l.tokenBuf }
// TokenBytes returns the UTF-8 encoding of Token. If builder is not nil then
// it's called instead to build the encoded token byte value into the buffer
// passed to it.
//
// The Result is R/O.
func (l *Lexer) TokenBytes(builder func(*bytes.Buffer)) []byte {
if len(l.bytesBuf.Bytes()) < 1<<18 { //DONE constant tuned
l.bytesBuf.Reset()
} else {
l.bytesBuf = bytes.Buffer{}
}
switch {
case builder != nil:
builder(&l.bytesBuf)
default:
for _, c := range l.Token() {
l.bytesBuf.WriteRune(c.Rune)
}
}
return l.bytesBuf.Bytes()
}
// Unget unreads all chars in c.
func (l *Lexer) Unget(c ...Char) {
l.ungetBuf = append(l.ungetBuf, c...)
l.lookahead = Char{} // Must invalidate lookahead.
}
// Option is a function which can be passed as an optional argument to New.
type Option func(*Lexer) error
// BOMMode option selects how the lexer handles BOMs. See the BOM* constants for details.
func BOMMode(mode int) Option {
return func(l *Lexer) error {
l.bomMode = mode
return nil
}
}
// ErrorFunc option sets a function called when an, for example I/O error,
// occurs. The default is to call Error with the position and message already
// formated as a string.
func ErrorFunc(f func(token.Pos, string)) Option {
return func(l *Lexer) error {
l.errorf = f
return nil
}
}
// RuneClass option sets the function used to convert runes to character
// classes.
func RuneClass(f func(rune) int) Option {
return func(l *Lexer) error {
l.classf = f
return nil
}
}

35
_vendor/vendor/github.com/cznic/golex/lex/dfa generated vendored Normal file
View File

@ -0,0 +1,35 @@
$ golex -DFA example.l
StartConditions:
INITIAL, scId:0, stateId:1
DFA:
[1]
"\t"..."\n", "\r", " ", --> 2
"0"..."9", --> 3
"A"..."Z", "_", "a"..."e", "g"..."z", "\u0080", --> 4
"f"--> 5
[2]
"\t"..."\n", "\r", " ", --> 2
[3]
"0"..."9", --> 3
[4]
"0"..."9", "A"..."Z", "_", "a"..."z", "\u0080"..."\u0081", --> 4
[5]
"0"..."9", "A"..."Z", "_", "a"..."t", "v"..."z", "\u0080"..."\u0081", --> 4
"u"--> 6
[6]
"0"..."9", "A"..."Z", "_", "a"..."m", "o"..."z", "\u0080"..."\u0081", --> 4
"n"--> 7
[7]
"0"..."9", "A"..."Z", "_", "a"..."b", "d"..."z", "\u0080"..."\u0081", --> 4
"c"--> 8
[8]
"0"..."9", "A"..."Z", "_", "a"..."z", "\u0080"..."\u0081", --> 4
state 2 accepts rule 1
state 3 accepts rule 4
state 4 accepts rule 3
state 5 accepts rule 3
state 6 accepts rule 3
state 7 accepts rule 3
state 8 accepts rule 2
$

40
_vendor/vendor/github.com/cznic/golex/lex/doc.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
// Copyright (c) 2015 The golex Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package lex is a Unicode-friendly run time library for golex[0] generated
// lexical analyzers[1].
//
// Changelog
//
// 2015-04-08: Initial release.
//
// Character classes
//
// Golex internally handles only 8 bit "characters". Many Unicode-aware
// tokenizers do not actually need to recognize every Unicode rune, but only
// some particular partitions/subsets. Like, for example, a particular Unicode
// category, say upper case letters: Lu.
//
// The idea is to convert all runes in a particular set as a single 8 bit
// character allocated outside the ASCII range of codes. The token value, a
// string of runes and their exact positions is collected as usual (see the
// Token and TokenBytes method), but the tokenizer DFA is simpler (and thus
// smaller and perhaps also faster) when this technique is used. In the example
// program (see below), recognizing (and skipping) white space, integer
// literals, one keyword and Go identifiers requires only an 8 state DFA[5].
//
// To provide the conversion from runes to character classes, "install" your
// converting function using the RuneClass option.
//
// References
//
// -
//
// [0]: http://godoc.org/github.com/cznic/golex
// [1]: http://en.wikipedia.org/wiki/Lexical_analysis
// [2]: http://golang.org/cmd/yacc/
// [3]: https://github.com/cznic/golex/blob/master/lex/example.l
// [4]: http://golang.org/pkg/io/#RuneReader
// [5]: https://github.com/cznic/golex/blob/master/lex/dfa
package lex

175
_vendor/vendor/github.com/cznic/golex/lex/example.l generated vendored Normal file
View File

@ -0,0 +1,175 @@
%yyc c
%yyn c = l.Next()
%yym l.Mark()
%{
// Copyright (c) 2015 The golex Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This is an example program using golex run time library. It is generated by
//
// $ golex -o example_test.go example.l
//
// The complete input file, example.l, is at [3], the scan function excerpt is:
//
// func (l *lexer) scan() lex.Char {
// c := l.Enter()
// %}
//
// digit [0-9]|{unicodeDigit}
// identifier {letter}({letter}|{digit})*
// int [0-9]+
// letter [_a-zA-Z]|{unicodeLetter}
// unicodeDigit \x81
// unicodeLetter \x80
//
// %%
//
// c = l.Rule0()
//
// [ \t\r\n]+
//
// func return l.char(FUNC)
// {identifier} return l.char(IDENT)
// {int} return l.char(INT)
//
//
// %%
// if c, ok := l.Abort(); ok {
// return l.char(c)
// }
//
// goto yyAction
// }
package lex_test
import (
"bytes"
"fmt"
"go/token"
"unicode"
"github.com/cznic/golex/lex"
)
// Allocate Character classes anywhere in [0x80, 0xFF].
const (
classUnicodeLeter = iota + 0x80
classUnicodeDigit
classOther
)
// Parser token values.
const (
FUNC = iota + 0xE002
INT
IDENT
)
// For pretty printing.
func str(r rune) string {
switch r {
case FUNC:
return "FUNC"
case INT:
return "INT"
case IDENT:
return "IDENT"
case lex.RuneEOF:
return "EOF"
}
return fmt.Sprintf("%q", r)
}
type lexer struct {
*lex.Lexer
}
func (l *lexer) char(r int) lex.Char {
return lex.NewChar(l.First.Pos(), rune(r))
}
func rune2Class(r rune) int {
if r >= 0 && r < 0x80 { // Keep ASCII as it is.
return int(r)
}
if unicode.IsLetter(r) {
return classUnicodeLeter
}
if unicode.IsDigit(r) {
return classUnicodeDigit
}
return classOther
}
const src = `
func Xφ42() int { return 314 }
`
func Example_completeGeneratedProgram() { // main
fset := token.NewFileSet()
file := fset.AddFile("example.go", -1, len(src))
src := bytes.NewBufferString(src)
lx, err := lex.New(file, src, lex.RuneClass(rune2Class))
if err != nil {
panic(err)
}
l := &lexer{lx}
for {
c := l.scan()
fmt.Printf("%v: %v %q\n", file.Position(c.Pos()), str(c.Rune), l.TokenBytes(nil))
if c.Rune == lex.RuneEOF {
return
}
}
// Output:
// example.go:3:1: FUNC "func"
// example.go:3:6: IDENT "Xφ42"
// example.go:3:11: '(' "("
// example.go:3:12: ')' ")"
// example.go:3:14: IDENT "int"
// example.go:3:18: '{' "{"
// example.go:3:20: IDENT "return"
// example.go:3:27: INT "314"
// example.go:3:31: '}' "}"
// example.go:4:2: EOF "\xff"
}
func (l *lexer) scan() lex.Char {
c := l.Enter()
%}
digit [0-9]|{unicodeDigit}
identifier {letter}({letter}|{digit})*
int [0-9]+
letter [_a-zA-Z]|{unicodeLetter}
unicodeDigit \x81
unicodeLetter \x80
%%
c = l.Rule0()
[ \t\r\n]+
func return l.char(FUNC)
{identifier} return l.char(IDENT)
{int} return l.char(INT)
%%
if c, ok := l.Abort(); ok {
return l.char(c)
}
goto yyAction
}

12
_vendor/vendor/github.com/cznic/mathutil/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,12 @@
# This file lists authors for copyright purposes. This file is distinct from
# the CONTRIBUTORS files. See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
#
# The email address is not required for organizations.
#
# Please keep the list sorted.
CZ.NIC z.s.p.o. <kontakt@nic.cz>
Jan Mercl <0xjnml@gmail.com>

10
_vendor/vendor/github.com/cznic/mathutil/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,10 @@
# This file lists people who contributed code to this repository. The AUTHORS
# file lists the copyright holders; this file lists people.
#
# Names should be added to this file like so:
# Name <email address>
#
# Please keep the list sorted.
Gary Burd <gary@beagledreams.com>
Jan Mercl <0xjnml@gmail.com>

27
_vendor/vendor/github.com/cznic/mathutil/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2014 The mathutil Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

31
_vendor/vendor/github.com/cznic/mathutil/Makefile generated vendored Normal file
View File

@ -0,0 +1,31 @@
# Copyright (c) 2014 The mathutil Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
.PHONY: all todo clean nuke
grep=--include=*.go --include=*.run --include=*.y
all: editor
go build
go vet || true
golint .
go install
make todo
clean:
go clean
editor:
go fmt
go test -i
go test
todo:
@grep -nr $(grep) ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* * || true
@grep -nr $(grep) TODO * || true
@grep -nr $(grep) BUG * || true
@grep -nr $(grep) println * || true
nuke: clean
go clean -i

10
_vendor/vendor/github.com/cznic/mathutil/README generated vendored Normal file
View File

@ -0,0 +1,10 @@
This is a goinstall-able mirror of modified code already published at:
http://git.nic.cz/redmine/projects/gornd/repository
Packages in this repository:
Install: $ go get github.com/cznic/mathutil
Godocs: http://godoc.org/github.com/cznic/mathutil
Install: $ go get github.com/cznic/mathutil/mersenne
Godocs: http://godoc.org/github.com/cznic/mathutil/mersenne

207
_vendor/vendor/github.com/cznic/mathutil/bits.go generated vendored Normal file
View File

@ -0,0 +1,207 @@
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mathutil
import (
"math/big"
)
// BitLenByte returns the bit width of the non zero part of n.
func BitLenByte(n byte) int {
return log2[n] + 1
}
// BitLenUint16 returns the bit width of the non zero part of n.
func BitLenUint16(n uint16) int {
if b := n >> 8; b != 0 {
return log2[b] + 8 + 1
}
return log2[n] + 1
}
// BitLenUint32 returns the bit width of the non zero part of n.
func BitLenUint32(n uint32) int {
if b := n >> 24; b != 0 {
return log2[b] + 24 + 1
}
if b := n >> 16; b != 0 {
return log2[b] + 16 + 1
}
if b := n >> 8; b != 0 {
return log2[b] + 8 + 1
}
return log2[n] + 1
}
// BitLen returns the bit width of the non zero part of n.
func BitLen(n int) int { // Should handle correctly [future] 64 bit Go ints
if IntBits == 64 {
return BitLenUint64(uint64(n))
}
if b := byte(n >> 24); b != 0 {
return log2[b] + 24 + 1
}
if b := byte(n >> 16); b != 0 {
return log2[b] + 16 + 1
}
if b := byte(n >> 8); b != 0 {
return log2[b] + 8 + 1
}
return log2[byte(n)] + 1
}
// BitLenUint returns the bit width of the non zero part of n.
func BitLenUint(n uint) int { // Should handle correctly [future] 64 bit Go uints
if IntBits == 64 {
return BitLenUint64(uint64(n))
}
if b := n >> 24; b != 0 {
return log2[b] + 24 + 1
}
if b := n >> 16; b != 0 {
return log2[b] + 16 + 1
}
if b := n >> 8; b != 0 {
return log2[b] + 8 + 1
}
return log2[n] + 1
}
// BitLenUint64 returns the bit width of the non zero part of n.
func BitLenUint64(n uint64) int {
if b := n >> 56; b != 0 {
return log2[b] + 56 + 1
}
if b := n >> 48; b != 0 {
return log2[b] + 48 + 1
}
if b := n >> 40; b != 0 {
return log2[b] + 40 + 1
}
if b := n >> 32; b != 0 {
return log2[b] + 32 + 1
}
if b := n >> 24; b != 0 {
return log2[b] + 24 + 1
}
if b := n >> 16; b != 0 {
return log2[b] + 16 + 1
}
if b := n >> 8; b != 0 {
return log2[b] + 8 + 1
}
return log2[n] + 1
}
// BitLenUintptr returns the bit width of the non zero part of n.
func BitLenUintptr(n uintptr) int {
if b := n >> 56; b != 0 {
return log2[b] + 56 + 1
}
if b := n >> 48; b != 0 {
return log2[b] + 48 + 1
}
if b := n >> 40; b != 0 {
return log2[b] + 40 + 1
}
if b := n >> 32; b != 0 {
return log2[b] + 32 + 1
}
if b := n >> 24; b != 0 {
return log2[b] + 24 + 1
}
if b := n >> 16; b != 0 {
return log2[b] + 16 + 1
}
if b := n >> 8; b != 0 {
return log2[b] + 8 + 1
}
return log2[n] + 1
}
// PopCountByte returns population count of n (number of bits set in n).
func PopCountByte(n byte) int {
return int(popcnt[byte(n)])
}
// PopCountUint16 returns population count of n (number of bits set in n).
func PopCountUint16(n uint16) int {
return int(popcnt[byte(n>>8)]) + int(popcnt[byte(n)])
}
// PopCountUint32 returns population count of n (number of bits set in n).
func PopCountUint32(n uint32) int {
return int(popcnt[byte(n>>24)]) + int(popcnt[byte(n>>16)]) +
int(popcnt[byte(n>>8)]) + int(popcnt[byte(n)])
}
// PopCount returns population count of n (number of bits set in n).
func PopCount(n int) int { // Should handle correctly [future] 64 bit Go ints
if IntBits == 64 {
return PopCountUint64(uint64(n))
}
return PopCountUint32(uint32(n))
}
// PopCountUint returns population count of n (number of bits set in n).
func PopCountUint(n uint) int { // Should handle correctly [future] 64 bit Go uints
if IntBits == 64 {
return PopCountUint64(uint64(n))
}
return PopCountUint32(uint32(n))
}
// PopCountUintptr returns population count of n (number of bits set in n).
func PopCountUintptr(n uintptr) int {
if UintPtrBits == 64 {
return PopCountUint64(uint64(n))
}
return PopCountUint32(uint32(n))
}
// PopCountUint64 returns population count of n (number of bits set in n).
func PopCountUint64(n uint64) int {
return int(popcnt[byte(n>>56)]) + int(popcnt[byte(n>>48)]) +
int(popcnt[byte(n>>40)]) + int(popcnt[byte(n>>32)]) +
int(popcnt[byte(n>>24)]) + int(popcnt[byte(n>>16)]) +
int(popcnt[byte(n>>8)]) + int(popcnt[byte(n)])
}
// PopCountBigInt returns population count of |n| (number of bits set in |n|).
func PopCountBigInt(n *big.Int) (r int) {
for _, v := range n.Bits() {
r += PopCountUintptr(uintptr(v))
}
return
}

46
_vendor/vendor/github.com/cznic/mathutil/envelope.go generated vendored Normal file
View File

@ -0,0 +1,46 @@
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mathutil
import (
"math"
)
// Approximation type determines approximation methods used by e.g. Envelope.
type Approximation int
// Specific approximation method tags
const (
_ Approximation = iota
Linear // As named
Sinusoidal // Smooth for all derivations
)
// Envelope is an utility for defining simple curves using a small (usually)
// set of data points. Envelope returns a value defined by x, points and
// approximation. The value of x must be in [0,1) otherwise the result is
// undefined or the function may panic. Points are interpreted as dividing the
// [0,1) interval in len(points)-1 sections, so len(points) must be > 1 or the
// function may panic. According to the left and right points closing/adjacent
// to the section the resulting value is interpolated using the chosen
// approximation method. Unsupported values of approximation are silently
// interpreted as 'Linear'.
func Envelope(x float64, points []float64, approximation Approximation) float64 {
step := 1 / float64(len(points)-1)
fslot := math.Floor(x / step)
mod := x - fslot*step
slot := int(fslot)
l, r := points[slot], points[slot+1]
rmod := mod / step
switch approximation {
case Sinusoidal:
k := (math.Sin(math.Pi*(rmod-0.5)) + 1) / 2
return l + (r-l)*k
case Linear:
fallthrough
default:
return l + (r-l)*rmod
}
}

829
_vendor/vendor/github.com/cznic/mathutil/mathutil.go generated vendored Normal file
View File

@ -0,0 +1,829 @@
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package mathutil provides utilities supplementing the standard 'math' and
// 'math/rand' packages.
//
// Compatibility issues
//
// 2013-12-13: The following functions have been REMOVED
//
// func Uint64ToBigInt(n uint64) *big.Int
// func Uint64FromBigInt(n *big.Int) (uint64, bool)
//
// 2013-05-13: The following functions are now DEPRECATED
//
// func Uint64ToBigInt(n uint64) *big.Int
// func Uint64FromBigInt(n *big.Int) (uint64, bool)
//
// These functions will be REMOVED with Go release 1.1+1.
//
// 2013-01-21: The following functions have been REMOVED
//
// func MaxInt() int
// func MinInt() int
// func MaxUint() uint
// func UintPtrBits() int
//
// They are now replaced by untyped constants
//
// MaxInt
// MinInt
// MaxUint
// UintPtrBits
//
// Additionally one more untyped constant was added
//
// IntBits
//
// This change breaks any existing code depending on the above removed
// functions. They should have not been published in the first place, that was
// unfortunate. Instead, defining such architecture and/or implementation
// specific integer limits and bit widths as untyped constants improves
// performance and allows for static dead code elimination if it depends on
// these values. Thanks to minux for pointing it out in the mail list
// (https://groups.google.com/d/msg/golang-nuts/tlPpLW6aJw8/NT3mpToH-a4J).
//
// 2012-12-12: The following functions will be DEPRECATED with Go release
// 1.0.3+1 and REMOVED with Go release 1.0.3+2, b/c of
// http://code.google.com/p/go/source/detail?r=954a79ee3ea8
//
// func Uint64ToBigInt(n uint64) *big.Int
// func Uint64FromBigInt(n *big.Int) (uint64, bool)
package mathutil
import (
"math"
"math/big"
)
// Architecture and/or implementation specific integer limits and bit widths.
const (
MaxInt = 1<<(IntBits-1) - 1
MinInt = -MaxInt - 1
MaxUint = 1<<IntBits - 1
IntBits = 1 << (^uint(0)>>32&1 + ^uint(0)>>16&1 + ^uint(0)>>8&1 + 3)
UintPtrBits = 1 << (^uintptr(0)>>32&1 + ^uintptr(0)>>16&1 + ^uintptr(0)>>8&1 + 3)
)
var (
_1 = big.NewInt(1)
_2 = big.NewInt(2)
)
// GCDByte returns the greatest common divisor of a and b. Based on:
// http://en.wikipedia.org/wiki/Euclidean_algorithm#Implementations
func GCDByte(a, b byte) byte {
for b != 0 {
a, b = b, a%b
}
return a
}
// GCDUint16 returns the greatest common divisor of a and b.
func GCDUint16(a, b uint16) uint16 {
for b != 0 {
a, b = b, a%b
}
return a
}
// GCD returns the greatest common divisor of a and b.
func GCDUint32(a, b uint32) uint32 {
for b != 0 {
a, b = b, a%b
}
return a
}
// GCD64 returns the greatest common divisor of a and b.
func GCDUint64(a, b uint64) uint64 {
for b != 0 {
a, b = b, a%b
}
return a
}
// ISqrt returns floor(sqrt(n)). Typical run time is few hundreds of ns.
func ISqrt(n uint32) (x uint32) {
if n == 0 {
return
}
if n >= math.MaxUint16*math.MaxUint16 {
return math.MaxUint16
}
var px, nx uint32
for x = n; ; px, x = x, nx {
nx = (x + n/x) / 2
if nx == x || nx == px {
break
}
}
return
}
// SqrtUint64 returns floor(sqrt(n)). Typical run time is about 0.5 µs.
func SqrtUint64(n uint64) (x uint64) {
if n == 0 {
return
}
if n >= math.MaxUint32*math.MaxUint32 {
return math.MaxUint32
}
var px, nx uint64
for x = n; ; px, x = x, nx {
nx = (x + n/x) / 2
if nx == x || nx == px {
break
}
}
return
}
// SqrtBig returns floor(sqrt(n)). It panics on n < 0.
func SqrtBig(n *big.Int) (x *big.Int) {
switch n.Sign() {
case -1:
panic(-1)
case 0:
return big.NewInt(0)
}
var px, nx big.Int
x = big.NewInt(0)
x.SetBit(x, n.BitLen()/2+1, 1)
for {
nx.Rsh(nx.Add(x, nx.Div(n, x)), 1)
if nx.Cmp(x) == 0 || nx.Cmp(&px) == 0 {
break
}
px.Set(x)
x.Set(&nx)
}
return
}
// Log2Byte returns log base 2 of n. It's the same as index of the highest
// bit set in n. For n == 0 -1 is returned.
func Log2Byte(n byte) int {
return log2[n]
}
// Log2Uint16 returns log base 2 of n. It's the same as index of the highest
// bit set in n. For n == 0 -1 is returned.
func Log2Uint16(n uint16) int {
if b := n >> 8; b != 0 {
return log2[b] + 8
}
return log2[n]
}
// Log2Uint32 returns log base 2 of n. It's the same as index of the highest
// bit set in n. For n == 0 -1 is returned.
func Log2Uint32(n uint32) int {
if b := n >> 24; b != 0 {
return log2[b] + 24
}
if b := n >> 16; b != 0 {
return log2[b] + 16
}
if b := n >> 8; b != 0 {
return log2[b] + 8
}
return log2[n]
}
// Log2Uint64 returns log base 2 of n. It's the same as index of the highest
// bit set in n. For n == 0 -1 is returned.
func Log2Uint64(n uint64) int {
if b := n >> 56; b != 0 {
return log2[b] + 56
}
if b := n >> 48; b != 0 {
return log2[b] + 48
}
if b := n >> 40; b != 0 {
return log2[b] + 40
}
if b := n >> 32; b != 0 {
return log2[b] + 32
}
if b := n >> 24; b != 0 {
return log2[b] + 24
}
if b := n >> 16; b != 0 {
return log2[b] + 16
}
if b := n >> 8; b != 0 {
return log2[b] + 8
}
return log2[n]
}
// ModPowByte computes (b^e)%m. It panics for m == 0 || b == e == 0.
//
// See also: http://en.wikipedia.org/wiki/Modular_exponentiation#Right-to-left_binary_method
func ModPowByte(b, e, m byte) byte {
if b == 0 && e == 0 {
panic(0)
}
if m == 1 {
return 0
}
r := uint16(1)
for b, m := uint16(b), uint16(m); e > 0; b, e = b*b%m, e>>1 {
if e&1 == 1 {
r = r * b % m
}
}
return byte(r)
}
// ModPowByte computes (b^e)%m. It panics for m == 0 || b == e == 0.
func ModPowUint16(b, e, m uint16) uint16 {
if b == 0 && e == 0 {
panic(0)
}
if m == 1 {
return 0
}
r := uint32(1)
for b, m := uint32(b), uint32(m); e > 0; b, e = b*b%m, e>>1 {
if e&1 == 1 {
r = r * b % m
}
}
return uint16(r)
}
// ModPowUint32 computes (b^e)%m. It panics for m == 0 || b == e == 0.
func ModPowUint32(b, e, m uint32) uint32 {
if b == 0 && e == 0 {
panic(0)
}
if m == 1 {
return 0
}
r := uint64(1)
for b, m := uint64(b), uint64(m); e > 0; b, e = b*b%m, e>>1 {
if e&1 == 1 {
r = r * b % m
}
}
return uint32(r)
}
// ModPowUint64 computes (b^e)%m. It panics for m == 0 || b == e == 0.
func ModPowUint64(b, e, m uint64) (r uint64) {
if b == 0 && e == 0 {
panic(0)
}
if m == 1 {
return 0
}
return modPowBigInt(big.NewInt(0).SetUint64(b), big.NewInt(0).SetUint64(e), big.NewInt(0).SetUint64(m)).Uint64()
}
func modPowBigInt(b, e, m *big.Int) (r *big.Int) {
r = big.NewInt(1)
for i, n := 0, e.BitLen(); i < n; i++ {
if e.Bit(i) != 0 {
r.Mod(r.Mul(r, b), m)
}
b.Mod(b.Mul(b, b), m)
}
return
}
// ModPowBigInt computes (b^e)%m. Returns nil for e < 0. It panics for m == 0 || b == e == 0.
func ModPowBigInt(b, e, m *big.Int) (r *big.Int) {
if b.Sign() == 0 && e.Sign() == 0 {
panic(0)
}
if m.Cmp(_1) == 0 {
return big.NewInt(0)
}
if e.Sign() < 0 {
return
}
return modPowBigInt(big.NewInt(0).Set(b), big.NewInt(0).Set(e), m)
}
var uint64ToBigIntDelta big.Int
func init() {
uint64ToBigIntDelta.SetBit(&uint64ToBigIntDelta, 63, 1)
}
var uintptrBits int
func init() {
x := uint64(math.MaxUint64)
uintptrBits = BitLenUintptr(uintptr(x))
}
// UintptrBits returns the bit width of an uintptr at the executing machine.
func UintptrBits() int {
return uintptrBits
}
// AddUint128_64 returns the uint128 sum of uint64 a and b.
func AddUint128_64(a, b uint64) (hi uint64, lo uint64) {
lo = a + b
if lo < a {
hi = 1
}
return
}
// MulUint128_64 returns the uint128 bit product of uint64 a and b.
func MulUint128_64(a, b uint64) (hi, lo uint64) {
/*
2^(2 W) ahi bhi + 2^W alo bhi + 2^W ahi blo + alo blo
FEDCBA98 76543210 FEDCBA98 76543210
---- alo*blo ----
---- alo*bhi ----
---- ahi*blo ----
---- ahi*bhi ----
*/
const w = 32
const m = 1<<w - 1
ahi, bhi, alo, blo := a>>w, b>>w, a&m, b&m
lo = alo * blo
mid1 := alo * bhi
mid2 := ahi * blo
c1, lo := AddUint128_64(lo, mid1<<w)
c2, lo := AddUint128_64(lo, mid2<<w)
_, hi = AddUint128_64(ahi*bhi, mid1>>w+mid2>>w+uint64(c1+c2))
return
}
// PowerizeBigInt returns (e, p) such that e is the smallest number for which p
// == b^e is greater or equal n. For n < 0 or b < 2 (0, nil) is returned.
//
// NOTE: Run time for large values of n (above about 2^1e6 ~= 1e300000) can be
// significant and/or unacceptabe. For any smaller values of n the function
// typically performs in sub second time. For "small" values of n (cca bellow
// 2^1e3 ~= 1e300) the same can be easily below 10 µs.
//
// A special (and trivial) case of b == 2 is handled separately and performs
// much faster.
func PowerizeBigInt(b, n *big.Int) (e uint32, p *big.Int) {
switch {
case b.Cmp(_2) < 0 || n.Sign() < 0:
return
case n.Sign() == 0 || n.Cmp(_1) == 0:
return 0, big.NewInt(1)
case b.Cmp(_2) == 0:
p = big.NewInt(0)
e = uint32(n.BitLen() - 1)
p.SetBit(p, int(e), 1)
if p.Cmp(n) < 0 {
p.Mul(p, _2)
e++
}
return
}
bw := b.BitLen()
nw := n.BitLen()
p = big.NewInt(1)
var bb, r big.Int
for {
switch p.Cmp(n) {
case -1:
x := uint32((nw - p.BitLen()) / bw)
if x == 0 {
x = 1
}
e += x
switch x {
case 1:
p.Mul(p, b)
default:
r.Set(_1)
bb.Set(b)
e := x
for {
if e&1 != 0 {
r.Mul(&r, &bb)
}
if e >>= 1; e == 0 {
break
}
bb.Mul(&bb, &bb)
}
p.Mul(p, &r)
}
case 0, 1:
return
}
}
}
// PowerizeUint32BigInt returns (e, p) such that e is the smallest number for
// which p == b^e is greater or equal n. For n < 0 or b < 2 (0, nil) is
// returned.
//
// More info: see PowerizeBigInt.
func PowerizeUint32BigInt(b uint32, n *big.Int) (e uint32, p *big.Int) {
switch {
case b < 2 || n.Sign() < 0:
return
case n.Sign() == 0 || n.Cmp(_1) == 0:
return 0, big.NewInt(1)
case b == 2:
p = big.NewInt(0)
e = uint32(n.BitLen() - 1)
p.SetBit(p, int(e), 1)
if p.Cmp(n) < 0 {
p.Mul(p, _2)
e++
}
return
}
var bb big.Int
bb.SetInt64(int64(b))
return PowerizeBigInt(&bb, n)
}
/*
ProbablyPrimeUint32 returns true if n is prime or n is a pseudoprime to base a.
It implements the Miller-Rabin primality test for one specific value of 'a' and
k == 1.
Wrt pseudocode shown at
http://en.wikipedia.org/wiki/Miller-Rabin_primality_test#Algorithm_and_running_time
Input: n > 3, an odd integer to be tested for primality;
Input: k, a parameter that determines the accuracy of the test
Output: composite if n is composite, otherwise probably prime
write n − 1 as 2^s·d with d odd by factoring powers of 2 from n − 1
LOOP: repeat k times:
pick a random integer a in the range [2, n − 2]
x ← a^d mod n
if x = 1 or x = n − 1 then do next LOOP
for r = 1 .. s − 1
x ← x^2 mod n
if x = 1 then return composite
if x = n − 1 then do next LOOP
return composite
return probably prime
... this function behaves like passing 1 for 'k' and additionally a
fixed/non-random 'a'. Otherwise it's the same algorithm.
See also: http://mathworld.wolfram.com/Rabin-MillerStrongPseudoprimeTest.html
*/
func ProbablyPrimeUint32(n, a uint32) bool {
d, s := n-1, 0
for ; d&1 == 0; d, s = d>>1, s+1 {
}
x := uint64(ModPowUint32(a, d, n))
if x == 1 || uint32(x) == n-1 {
return true
}
for ; s > 1; s-- {
if x = x * x % uint64(n); x == 1 {
return false
}
if uint32(x) == n-1 {
return true
}
}
return false
}
// ProbablyPrimeUint64_32 returns true if n is prime or n is a pseudoprime to
// base a. It implements the Miller-Rabin primality test for one specific value
// of 'a' and k == 1. See also ProbablyPrimeUint32.
func ProbablyPrimeUint64_32(n uint64, a uint32) bool {
d, s := n-1, 0
for ; d&1 == 0; d, s = d>>1, s+1 {
}
x := ModPowUint64(uint64(a), d, n)
if x == 1 || x == n-1 {
return true
}
bx, bn := big.NewInt(0).SetUint64(x), big.NewInt(0).SetUint64(n)
for ; s > 1; s-- {
if x = bx.Mod(bx.Mul(bx, bx), bn).Uint64(); x == 1 {
return false
}
if x == n-1 {
return true
}
}
return false
}
// ProbablyPrimeBigInt_32 returns true if n is prime or n is a pseudoprime to
// base a. It implements the Miller-Rabin primality test for one specific value
// of 'a' and k == 1. See also ProbablyPrimeUint32.
func ProbablyPrimeBigInt_32(n *big.Int, a uint32) bool {
var d big.Int
d.Set(n)
d.Sub(&d, _1) // d <- n-1
s := 0
for ; d.Bit(s) == 0; s++ {
}
nMinus1 := big.NewInt(0).Set(&d)
d.Rsh(&d, uint(s))
x := ModPowBigInt(big.NewInt(int64(a)), &d, n)
if x.Cmp(_1) == 0 || x.Cmp(nMinus1) == 0 {
return true
}
for ; s > 1; s-- {
if x = x.Mod(x.Mul(x, x), n); x.Cmp(_1) == 0 {
return false
}
if x.Cmp(nMinus1) == 0 {
return true
}
}
return false
}
// ProbablyPrimeBigInt returns true if n is prime or n is a pseudoprime to base
// a. It implements the Miller-Rabin primality test for one specific value of
// 'a' and k == 1. See also ProbablyPrimeUint32.
func ProbablyPrimeBigInt(n, a *big.Int) bool {
var d big.Int
d.Set(n)
d.Sub(&d, _1) // d <- n-1
s := 0
for ; d.Bit(s) == 0; s++ {
}
nMinus1 := big.NewInt(0).Set(&d)
d.Rsh(&d, uint(s))
x := ModPowBigInt(a, &d, n)
if x.Cmp(_1) == 0 || x.Cmp(nMinus1) == 0 {
return true
}
for ; s > 1; s-- {
if x = x.Mod(x.Mul(x, x), n); x.Cmp(_1) == 0 {
return false
}
if x.Cmp(nMinus1) == 0 {
return true
}
}
return false
}
// Max returns the larger of a and b.
func Max(a, b int) int {
if a > b {
return a
}
return b
}
// Min returns the smaller of a and b.
func Min(a, b int) int {
if a < b {
return a
}
return b
}
// UMax returns the larger of a and b.
func UMax(a, b uint) uint {
if a > b {
return a
}
return b
}
// UMin returns the smaller of a and b.
func UMin(a, b uint) uint {
if a < b {
return a
}
return b
}
// MaxByte returns the larger of a and b.
func MaxByte(a, b byte) byte {
if a > b {
return a
}
return b
}
// MinByte returns the smaller of a and b.
func MinByte(a, b byte) byte {
if a < b {
return a
}
return b
}
// MaxInt8 returns the larger of a and b.
func MaxInt8(a, b int8) int8 {
if a > b {
return a
}
return b
}
// MinInt8 returns the smaller of a and b.
func MinInt8(a, b int8) int8 {
if a < b {
return a
}
return b
}
// MaxUint16 returns the larger of a and b.
func MaxUint16(a, b uint16) uint16 {
if a > b {
return a
}
return b
}
// MinUint16 returns the smaller of a and b.
func MinUint16(a, b uint16) uint16 {
if a < b {
return a
}
return b
}
// MaxInt16 returns the larger of a and b.
func MaxInt16(a, b int16) int16 {
if a > b {
return a
}
return b
}
// MinInt16 returns the smaller of a and b.
func MinInt16(a, b int16) int16 {
if a < b {
return a
}
return b
}
// MaxUint32 returns the larger of a and b.
func MaxUint32(a, b uint32) uint32 {
if a > b {
return a
}
return b
}
// MinUint32 returns the smaller of a and b.
func MinUint32(a, b uint32) uint32 {
if a < b {
return a
}
return b
}
// MaxInt32 returns the larger of a and b.
func MaxInt32(a, b int32) int32 {
if a > b {
return a
}
return b
}
// MinInt32 returns the smaller of a and b.
func MinInt32(a, b int32) int32 {
if a < b {
return a
}
return b
}
// MaxUint64 returns the larger of a and b.
func MaxUint64(a, b uint64) uint64 {
if a > b {
return a
}
return b
}
// MinUint64 returns the smaller of a and b.
func MinUint64(a, b uint64) uint64 {
if a < b {
return a
}
return b
}
// MaxInt64 returns the larger of a and b.
func MaxInt64(a, b int64) int64 {
if a > b {
return a
}
return b
}
// MinInt64 returns the smaller of a and b.
func MinInt64(a, b int64) int64 {
if a < b {
return a
}
return b
}
// ToBase produces n in base b. For example
//
// ToBase(2047, 22) -> [1, 5, 4]
//
// 1 * 22^0 1
// 5 * 22^1 110
// 4 * 22^2 1936
// ----
// 2047
//
// ToBase panics for bases < 2.
func ToBase(n *big.Int, b int) []int {
var nn big.Int
nn.Set(n)
if b < 2 {
panic("invalid base")
}
k := 1
switch nn.Sign() {
case -1:
nn.Neg(&nn)
k = -1
case 0:
return []int{0}
}
bb := big.NewInt(int64(b))
var r []int
rem := big.NewInt(0)
for nn.Sign() != 0 {
nn.QuoRem(&nn, bb, rem)
r = append(r, k*int(rem.Int64()))
}
return r
}

View File

@ -0,0 +1,267 @@
$ ./example -max 100000000 > rnd.dat
$ ./assess 1000000
G E N E R A T O R S E L E C T I O N
______________________________________
[0] Input File [1] Linear Congruential
[2] Quadratic Congruential I [3] Quadratic Congruential II
[4] Cubic Congruential [5] XOR
[6] Modular Exponentiation [7] Blum-Blum-Shub
[8] Micali-Schnorr [9] G Using SHA-1
Enter Choice: 0
User Prescribed Input File: rnd.dat
S T A T I S T I C A L T E S T S
_________________________________
[01] Frequency [02] Block Frequency
[03] Cumulative Sums [04] Runs
[05] Longest Run of Ones [06] Rank
[07] Discrete Fourier Transform [08] Nonperiodic Template Matchings
[09] Overlapping Template Matchings [10] Universal Statistical
[11] Approximate Entropy [12] Random Excursions
[13] Random Excursions Variant [14] Serial
[15] Linear Complexity
INSTRUCTIONS
Enter 0 if you DO NOT want to apply all of the
statistical tests to each sequence and 1 if you DO.
Enter Choice: 1
P a r a m e t e r A d j u s t m e n t s
-----------------------------------------
[1] Block Frequency Test - block length(M): 128
[2] NonOverlapping Template Test - block length(m): 9
[3] Overlapping Template Test - block length(m): 9
[4] Approximate Entropy Test - block length(m): 10
[5] Serial Test - block length(m): 16
[6] Linear Complexity Test - block length(M): 500
Select Test (0 to continue): 0
How many bitstreams? 200
Input File Format:
[0] ASCII - A sequence of ASCII 0's and 1's
[1] Binary - Each byte in data file contains 8 bits of data
Select input mode: 1
Statistical Testing In Progress.........
Statistical Testing Complete!!!!!!!!!!!!
$ cat experiments/AlgorithmTesting/finalAnalysisReport.txt
------------------------------------------------------------------------------
RESULTS FOR THE UNIFORMITY OF P-VALUES AND THE PROPORTION OF PASSING SEQUENCES
------------------------------------------------------------------------------
generator is <rnd.dat>
------------------------------------------------------------------------------
C1 C2 C3 C4 C5 C6 C7 C8 C9 C10 P-VALUE PROPORTION STATISTICAL TEST
------------------------------------------------------------------------------
28 22 17 19 15 8 24 23 19 25 0.093720 198/200 Frequency
20 18 24 14 18 17 16 28 21 24 0.504219 199/200 BlockFrequency
25 22 17 24 19 21 22 15 16 19 0.825505 197/200 CumulativeSums
27 17 16 22 14 26 14 25 19 20 0.304126 199/200 CumulativeSums
22 19 14 23 22 22 13 28 13 24 0.224821 199/200 Runs
20 24 18 21 15 13 22 23 24 20 0.719747 197/200 LongestRun
22 26 18 22 26 15 17 22 20 12 0.410055 199/200 Rank
25 22 26 22 20 16 20 20 16 13 0.585209 195/200 FFT
22 11 15 26 33 24 21 13 14 21 0.013102 197/200 NonOverlappingTemplate
17 11 16 27 19 24 19 20 28 19 0.219006 200/200 NonOverlappingTemplate
23 27 24 15 21 11 18 27 15 19 0.162606 197/200 NonOverlappingTemplate
21 18 13 20 19 23 20 17 26 23 0.749884 197/200 NonOverlappingTemplate
24 22 24 24 24 21 13 15 17 16 0.494392 196/200 NonOverlappingTemplate
24 16 23 15 23 18 25 16 18 22 0.699313 199/200 NonOverlappingTemplate
19 23 21 16 27 18 17 20 18 21 0.859637 198/200 NonOverlappingTemplate
12 20 16 19 26 14 30 20 24 19 0.141256 198/200 NonOverlappingTemplate
18 21 17 21 20 14 25 19 24 21 0.859637 198/200 NonOverlappingTemplate
24 25 21 18 23 15 23 17 16 18 0.749884 199/200 NonOverlappingTemplate
20 22 22 18 16 22 28 16 14 22 0.574903 198/200 NonOverlappingTemplate
18 23 22 17 24 25 19 16 23 13 0.626709 199/200 NonOverlappingTemplate
17 22 14 19 21 21 18 19 24 25 0.842937 198/200 NonOverlappingTemplate
18 17 26 21 22 15 22 18 21 20 0.883171 197/200 NonOverlappingTemplate
19 25 16 32 15 19 20 18 16 20 0.236810 199/200 NonOverlappingTemplate
19 18 15 21 24 22 18 21 20 22 0.964295 200/200 NonOverlappingTemplate
21 14 17 23 26 19 20 22 20 18 0.834308 196/200 NonOverlappingTemplate
15 21 17 27 26 23 21 17 24 9 0.129620 198/200 NonOverlappingTemplate
25 17 19 19 18 22 21 22 21 16 0.951205 196/200 NonOverlappingTemplate
20 19 24 21 19 24 16 18 17 22 0.946308 197/200 NonOverlappingTemplate
27 16 19 18 23 19 22 17 22 17 0.807412 197/200 NonOverlappingTemplate
14 18 21 23 23 20 14 22 20 25 0.719747 198/200 NonOverlappingTemplate
18 22 19 12 24 25 25 22 18 15 0.474986 198/200 NonOverlappingTemplate
21 18 23 17 19 18 28 19 20 17 0.825505 198/200 NonOverlappingTemplate
20 19 15 16 27 20 26 17 20 20 0.657933 198/200 NonOverlappingTemplate
17 25 21 21 11 19 22 16 27 21 0.401199 198/200 NonOverlappingTemplate
19 16 15 18 24 19 25 25 19 20 0.769527 199/200 NonOverlappingTemplate
18 20 20 26 20 12 24 25 19 16 0.524101 198/200 NonOverlappingTemplate
14 16 18 23 21 21 19 19 28 21 0.668321 197/200 NonOverlappingTemplate
21 20 23 25 21 22 19 17 14 18 0.875539 197/200 NonOverlappingTemplate
14 16 29 22 23 13 20 29 17 17 0.099513 197/200 NonOverlappingTemplate
14 19 27 19 17 23 18 24 20 19 0.709558 199/200 NonOverlappingTemplate
18 15 21 19 27 22 21 23 17 17 0.779188 198/200 NonOverlappingTemplate
13 23 13 22 22 23 22 21 21 20 0.689019 199/200 NonOverlappingTemplate
17 14 26 26 16 21 30 15 21 14 0.096578 199/200 NonOverlappingTemplate
18 21 24 23 21 13 23 23 19 15 0.719747 197/200 NonOverlappingTemplate
19 21 14 32 20 15 16 18 24 21 0.202268 199/200 NonOverlappingTemplate
27 22 20 21 21 14 15 22 14 24 0.474986 196/200 NonOverlappingTemplate
31 12 25 11 21 18 19 16 24 23 0.050305 197/200 NonOverlappingTemplate
17 26 20 22 15 27 22 19 12 20 0.383827 199/200 NonOverlappingTemplate
15 22 14 14 31 15 27 18 23 21 0.078086 194/200 NonOverlappingTemplate
19 19 14 15 24 21 25 21 20 22 0.788728 197/200 NonOverlappingTemplate
20 21 19 22 25 18 13 24 28 10 0.153763 195/200 NonOverlappingTemplate
23 17 21 25 21 20 13 30 14 16 0.196920 196/200 NonOverlappingTemplate
17 31 17 22 16 15 28 23 11 20 0.050305 197/200 NonOverlappingTemplate
15 21 26 27 15 18 19 21 18 20 0.605916 198/200 NonOverlappingTemplate
23 18 15 14 20 21 20 20 20 29 0.554420 200/200 NonOverlappingTemplate
22 19 19 18 19 17 22 21 31 12 0.311542 199/200 NonOverlappingTemplate
16 22 23 21 19 19 18 24 21 17 0.960198 197/200 NonOverlappingTemplate
21 21 17 20 16 23 25 22 18 17 0.917870 200/200 NonOverlappingTemplate
27 17 17 16 21 20 22 18 21 21 0.859637 197/200 NonOverlappingTemplate
18 24 15 27 18 21 18 16 24 19 0.657933 199/200 NonOverlappingTemplate
13 16 21 21 15 25 18 22 29 20 0.326749 198/200 NonOverlappingTemplate
18 17 23 23 15 19 26 30 11 18 0.125927 198/200 NonOverlappingTemplate
30 21 18 22 17 21 15 17 21 18 0.544254 195/200 NonOverlappingTemplate
12 18 19 24 16 24 18 24 28 17 0.311542 199/200 NonOverlappingTemplate
20 15 23 15 18 30 23 18 17 21 0.410055 196/200 NonOverlappingTemplate
15 18 23 16 29 21 22 16 19 21 0.544254 200/200 NonOverlappingTemplate
18 16 27 13 21 22 22 21 16 24 0.534146 199/200 NonOverlappingTemplate
20 25 18 21 16 21 17 28 21 13 0.484646 200/200 NonOverlappingTemplate
23 22 13 22 14 20 26 18 19 23 0.574903 197/200 NonOverlappingTemplate
21 24 25 13 19 22 18 13 24 21 0.504219 199/200 NonOverlappingTemplate
19 13 18 25 22 15 23 28 19 18 0.410055 195/200 NonOverlappingTemplate
20 15 27 22 26 26 14 13 21 16 0.181557 198/200 NonOverlappingTemplate
18 18 19 23 18 20 19 21 24 20 0.991468 200/200 NonOverlappingTemplate
18 23 17 14 20 25 22 22 22 17 0.816537 198/200 NonOverlappingTemplate
26 15 15 11 23 21 21 16 36 16 0.005557 196/200 NonOverlappingTemplate
27 13 21 23 21 16 19 20 16 24 0.544254 198/200 NonOverlappingTemplate
16 15 32 17 20 23 22 19 20 16 0.262249 200/200 NonOverlappingTemplate
26 19 24 13 24 16 18 18 13 29 0.137282 199/200 NonOverlappingTemplate
15 18 14 27 32 21 15 20 19 19 0.112047 198/200 NonOverlappingTemplate
22 23 22 18 20 23 19 22 16 15 0.924076 196/200 NonOverlappingTemplate
18 17 21 22 14 17 22 24 20 25 0.798139 199/200 NonOverlappingTemplate
15 17 19 24 21 23 17 25 23 16 0.739918 196/200 NonOverlappingTemplate
22 11 15 26 32 25 21 13 14 21 0.017305 197/200 NonOverlappingTemplate
22 16 19 23 22 21 21 19 17 20 0.985788 200/200 NonOverlappingTemplate
22 28 18 24 14 20 23 21 20 10 0.230755 198/200 NonOverlappingTemplate
14 13 22 28 14 28 17 22 23 19 0.129620 197/200 NonOverlappingTemplate
22 16 22 20 21 21 16 19 18 25 0.935716 198/200 NonOverlappingTemplate
15 20 23 17 19 22 21 23 18 22 0.951205 200/200 NonOverlappingTemplate
20 24 21 19 17 19 19 24 15 22 0.930026 198/200 NonOverlappingTemplate
18 21 15 21 17 28 24 22 20 14 0.534146 200/200 NonOverlappingTemplate
19 15 19 19 20 20 15 25 23 25 0.779188 198/200 NonOverlappingTemplate
17 24 25 16 15 21 18 19 23 22 0.788728 198/200 NonOverlappingTemplate
15 20 18 25 24 15 21 31 18 13 0.141256 200/200 NonOverlappingTemplate
24 17 19 20 18 21 15 22 24 20 0.924076 196/200 NonOverlappingTemplate
23 18 17 21 17 28 23 21 18 14 0.605916 197/200 NonOverlappingTemplate
21 19 22 23 16 17 20 21 22 19 0.985788 200/200 NonOverlappingTemplate
27 17 21 27 24 15 15 17 15 22 0.304126 199/200 NonOverlappingTemplate
25 28 20 24 13 14 16 22 19 19 0.304126 197/200 NonOverlappingTemplate
27 16 14 24 22 18 24 20 18 17 0.564639 196/200 NonOverlappingTemplate
18 18 24 19 19 19 26 11 27 19 0.375313 195/200 NonOverlappingTemplate
20 15 29 19 26 16 21 11 18 25 0.141256 197/200 NonOverlappingTemplate
19 14 21 25 11 23 22 25 26 14 0.176657 199/200 NonOverlappingTemplate
18 23 20 17 19 18 29 22 26 8 0.102526 199/200 NonOverlappingTemplate
22 17 18 16 18 20 19 19 25 26 0.834308 198/200 NonOverlappingTemplate
25 18 14 16 16 24 18 18 30 21 0.268917 198/200 NonOverlappingTemplate
24 21 23 13 12 22 20 23 20 22 0.554420 196/200 NonOverlappingTemplate
18 21 21 30 22 17 19 14 18 20 0.534146 197/200 NonOverlappingTemplate
25 20 22 21 15 18 17 20 17 25 0.825505 199/200 NonOverlappingTemplate
18 21 22 21 18 20 26 16 20 18 0.941144 197/200 NonOverlappingTemplate
23 18 22 25 12 16 17 19 26 22 0.474986 198/200 NonOverlappingTemplate
22 18 29 23 19 23 17 17 15 17 0.534146 198/200 NonOverlappingTemplate
19 21 17 26 18 15 22 26 15 21 0.626709 197/200 NonOverlappingTemplate
16 20 20 23 18 21 18 18 25 21 0.955835 199/200 NonOverlappingTemplate
23 21 20 21 22 10 15 27 15 26 0.186566 198/200 NonOverlappingTemplate
18 26 20 26 26 18 17 17 20 12 0.358641 198/200 NonOverlappingTemplate
24 20 21 18 24 12 19 27 14 21 0.401199 195/200 NonOverlappingTemplate
16 25 15 21 24 18 18 25 22 16 0.657933 199/200 NonOverlappingTemplate
24 14 17 26 15 17 17 25 21 24 0.428095 200/200 NonOverlappingTemplate
22 24 11 20 22 24 19 18 12 28 0.176657 196/200 NonOverlappingTemplate
27 16 27 18 27 14 13 16 21 21 0.141256 197/200 NonOverlappingTemplate
23 25 20 18 23 17 15 23 19 17 0.834308 196/200 NonOverlappingTemplate
19 21 20 27 16 16 18 25 16 22 0.678686 199/200 NonOverlappingTemplate
25 22 21 19 15 19 22 19 25 13 0.657933 197/200 NonOverlappingTemplate
19 28 21 25 20 12 18 13 29 15 0.073417 198/200 NonOverlappingTemplate
20 24 21 19 21 15 17 24 20 19 0.941144 198/200 NonOverlappingTemplate
18 29 23 17 24 19 17 18 16 19 0.585209 200/200 NonOverlappingTemplate
18 28 18 16 25 21 18 20 14 22 0.544254 198/200 NonOverlappingTemplate
22 19 23 22 22 21 21 26 12 12 0.401199 199/200 NonOverlappingTemplate
22 15 25 16 21 27 14 22 21 17 0.484646 199/200 NonOverlappingTemplate
18 25 20 23 30 17 13 22 18 14 0.213309 200/200 NonOverlappingTemplate
20 23 21 21 23 29 16 13 16 18 0.410055 199/200 NonOverlappingTemplate
21 19 16 22 31 18 20 17 18 18 0.514124 198/200 NonOverlappingTemplate
26 22 12 14 23 17 21 24 21 20 0.455937 197/200 NonOverlappingTemplate
21 17 18 17 14 32 21 26 18 16 0.162606 197/200 NonOverlappingTemplate
22 24 22 23 11 15 17 18 29 19 0.230755 198/200 NonOverlappingTemplate
19 27 20 19 23 15 24 15 21 17 0.657933 198/200 NonOverlappingTemplate
20 25 16 10 24 13 23 21 21 27 0.149495 200/200 NonOverlappingTemplate
19 21 21 27 17 17 19 21 21 17 0.904708 200/200 NonOverlappingTemplate
18 23 15 19 24 21 23 21 13 23 0.719747 198/200 NonOverlappingTemplate
26 16 28 19 19 18 17 17 16 24 0.474986 199/200 NonOverlappingTemplate
24 32 17 18 20 13 18 18 19 21 0.236810 195/200 NonOverlappingTemplate
26 25 18 17 12 19 20 23 21 19 0.585209 196/200 NonOverlappingTemplate
18 26 25 12 18 16 24 19 18 24 0.410055 199/200 NonOverlappingTemplate
27 21 22 27 21 14 18 14 23 13 0.219006 197/200 NonOverlappingTemplate
18 23 24 16 19 21 16 26 20 17 0.798139 199/200 NonOverlappingTemplate
19 30 15 27 14 19 24 11 22 19 0.073417 198/200 NonOverlappingTemplate
20 23 22 20 22 15 22 21 18 17 0.964295 198/200 NonOverlappingTemplate
22 31 16 26 13 19 17 22 24 10 0.037566 197/200 NonOverlappingTemplate
18 24 22 14 23 19 16 18 19 27 0.637119 197/200 NonOverlappingTemplate
19 20 21 22 21 18 19 22 20 18 0.999438 198/200 NonOverlappingTemplate
27 15 21 18 28 18 15 23 18 17 0.375313 195/200 NonOverlappingTemplate
26 23 20 20 23 19 20 23 14 12 0.514124 199/200 NonOverlappingTemplate
18 19 11 15 21 24 20 26 23 23 0.428095 198/200 NonOverlappingTemplate
19 16 21 25 19 21 15 24 24 16 0.749884 197/200 NonOverlappingTemplate
17 26 23 18 20 26 23 14 18 15 0.494392 198/200 NonOverlappingTemplate
15 17 19 24 21 23 17 25 23 16 0.739918 196/200 NonOverlappingTemplate
26 19 20 20 24 22 22 13 14 20 0.605916 198/200 OverlappingTemplate
29 24 17 21 18 13 18 21 17 22 0.446556 196/200 Universal
22 18 22 20 20 21 22 21 18 16 0.992952 198/200 ApproximateEntropy
14 8 13 9 11 13 13 8 7 10 0.719747 106/106 RandomExcursions
13 18 9 7 12 12 9 6 12 8 0.236810 104/106 RandomExcursions
11 15 10 7 11 14 9 6 12 11 0.595549 106/106 RandomExcursions
15 7 12 12 9 11 16 8 10 6 0.350485 106/106 RandomExcursions
10 10 12 16 10 12 10 7 13 6 0.554420 106/106 RandomExcursions
8 7 12 10 11 16 11 13 10 8 0.657933 106/106 RandomExcursions
9 6 12 12 14 9 11 13 10 10 0.816537 104/106 RandomExcursions
10 10 7 12 11 9 10 13 14 10 0.911413 105/106 RandomExcursions
8 8 12 9 10 5 13 12 17 12 0.319084 104/106 RandomExcursionsVariant
5 11 10 11 7 11 10 15 11 15 0.455937 104/106 RandomExcursionsVariant
6 12 11 8 12 12 12 13 13 7 0.699313 104/106 RandomExcursionsVariant
14 10 11 6 12 9 8 12 11 13 0.779188 104/106 RandomExcursionsVariant
12 12 10 7 17 6 6 12 13 11 0.262249 103/106 RandomExcursionsVariant
13 8 14 13 7 6 6 13 15 11 0.249284 102/106 RandomExcursionsVariant
12 12 12 13 7 9 6 13 12 10 0.739918 105/106 RandomExcursionsVariant
13 15 12 8 9 10 6 9 14 10 0.574903 106/106 RandomExcursionsVariant
10 15 9 12 14 10 8 11 7 10 0.739918 105/106 RandomExcursionsVariant
13 12 8 11 12 11 9 10 11 9 0.978072 103/106 RandomExcursionsVariant
10 13 12 12 8 13 8 9 14 7 0.739918 104/106 RandomExcursionsVariant
12 10 10 14 7 8 7 13 14 11 0.657933 106/106 RandomExcursionsVariant
10 13 10 10 13 10 12 6 10 12 0.897763 106/106 RandomExcursionsVariant
9 12 15 8 13 8 12 8 11 10 0.779188 106/106 RandomExcursionsVariant
9 13 15 10 10 10 8 14 6 11 0.616305 106/106 RandomExcursionsVariant
7 17 9 12 9 11 10 16 4 11 0.129620 106/106 RandomExcursionsVariant
10 9 10 15 7 12 7 8 12 16 0.419021 106/106 RandomExcursionsVariant
9 12 11 8 8 9 15 12 9 13 0.798139 106/106 RandomExcursionsVariant
17 34 11 22 22 17 19 20 13 25 0.026057 199/200 Serial
22 20 16 22 20 18 20 18 23 21 0.989786 199/200 Serial
12 33 25 29 21 11 21 15 14 19 0.003996 199/200 LinearComplexity
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
The minimum pass rate for each statistical test with the exception of the
random excursion (variant) test is approximately = 193 for a
sample size = 200 binary sequences.
The minimum pass rate for the random excursion (variant) test
is approximately = 101 for a sample size = 106 binary sequences.
For further guidelines construct a probability table using the MAPLE program
provided in the addendum section of the documentation.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
$

39
_vendor/vendor/github.com/cznic/mathutil/permute.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mathutil
import (
"sort"
)
// Generate the first permutation of data.
func PermutationFirst(data sort.Interface) {
sort.Sort(data)
}
// Generate the next permutation of data if possible and return true.
// Return false if there is no more permutation left.
// Based on the algorithm described here:
// http://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order
func PermutationNext(data sort.Interface) bool {
var k, l int
for k = data.Len() - 2; ; k-- { // 1.
if k < 0 {
return false
}
if data.Less(k, k+1) {
break
}
}
for l = data.Len() - 1; !data.Less(k, l); l-- { // 2.
}
data.Swap(k, l) // 3.
for i, j := k+1, data.Len()-1; i < j; i++ { // 4.
data.Swap(i, j)
j--
}
return true
}

335
_vendor/vendor/github.com/cznic/mathutil/primes.go generated vendored Normal file
View File

@ -0,0 +1,335 @@
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mathutil
import (
"math"
)
// IsPrimeUint16 returns true if n is prime. Typical run time is few ns.
func IsPrimeUint16(n uint16) bool {
return n > 0 && primes16[n-1] == 1
}
// NextPrimeUint16 returns first prime > n and true if successful or an
// undefined value and false if there is no next prime in the uint16 limits.
// Typical run time is few ns.
func NextPrimeUint16(n uint16) (p uint16, ok bool) {
return n + uint16(primes16[n]), n < 65521
}
// IsPrime returns true if n is prime. Typical run time is about 100 ns.
//
//TODO rename to IsPrimeUint32
func IsPrime(n uint32) bool {
switch {
case n&1 == 0:
return n == 2
case n%3 == 0:
return n == 3
case n%5 == 0:
return n == 5
case n%7 == 0:
return n == 7
case n%11 == 0:
return n == 11
case n%13 == 0:
return n == 13
case n%17 == 0:
return n == 17
case n%19 == 0:
return n == 19
case n%23 == 0:
return n == 23
case n%29 == 0:
return n == 29
case n%31 == 0:
return n == 31
case n%37 == 0:
return n == 37
case n%41 == 0:
return n == 41
case n%43 == 0:
return n == 43
case n%47 == 0:
return n == 47
case n%53 == 0:
return n == 53 // Benchmarked optimum
case n < 65536:
// use table data
return IsPrimeUint16(uint16(n))
default:
mod := ModPowUint32(2, (n+1)/2, n)
if mod != 2 && mod != n-2 {
return false
}
blk := &lohi[n>>24]
lo, hi := blk.lo, blk.hi
for lo <= hi {
index := (lo + hi) >> 1
liar := liars[index]
switch {
case n > liar:
lo = index + 1
case n < liar:
hi = index - 1
default:
return false
}
}
return true
}
}
// IsPrimeUint64 returns true if n is prime. Typical run time is few tens of µs.
//
// SPRP bases: http://miller-rabin.appspot.com
func IsPrimeUint64(n uint64) bool {
switch {
case n%2 == 0:
return n == 2
case n%3 == 0:
return n == 3
case n%5 == 0:
return n == 5
case n%7 == 0:
return n == 7
case n%11 == 0:
return n == 11
case n%13 == 0:
return n == 13
case n%17 == 0:
return n == 17
case n%19 == 0:
return n == 19
case n%23 == 0:
return n == 23
case n%29 == 0:
return n == 29
case n%31 == 0:
return n == 31
case n%37 == 0:
return n == 37
case n%41 == 0:
return n == 41
case n%43 == 0:
return n == 43
case n%47 == 0:
return n == 47
case n%53 == 0:
return n == 53
case n%59 == 0:
return n == 59
case n%61 == 0:
return n == 61
case n%67 == 0:
return n == 67
case n%71 == 0:
return n == 71
case n%73 == 0:
return n == 73
case n%79 == 0:
return n == 79
case n%83 == 0:
return n == 83
case n%89 == 0:
return n == 89 // Benchmarked optimum
case n <= math.MaxUint16:
return IsPrimeUint16(uint16(n))
case n <= math.MaxUint32:
return ProbablyPrimeUint32(uint32(n), 11000544) &&
ProbablyPrimeUint32(uint32(n), 31481107)
case n < 105936894253:
return ProbablyPrimeUint64_32(n, 2) &&
ProbablyPrimeUint64_32(n, 1005905886) &&
ProbablyPrimeUint64_32(n, 1340600841)
case n < 31858317218647:
return ProbablyPrimeUint64_32(n, 2) &&
ProbablyPrimeUint64_32(n, 642735) &&
ProbablyPrimeUint64_32(n, 553174392) &&
ProbablyPrimeUint64_32(n, 3046413974)
case n < 3071837692357849:
return ProbablyPrimeUint64_32(n, 2) &&
ProbablyPrimeUint64_32(n, 75088) &&
ProbablyPrimeUint64_32(n, 642735) &&
ProbablyPrimeUint64_32(n, 203659041) &&
ProbablyPrimeUint64_32(n, 3613982119)
default:
return ProbablyPrimeUint64_32(n, 2) &&
ProbablyPrimeUint64_32(n, 325) &&
ProbablyPrimeUint64_32(n, 9375) &&
ProbablyPrimeUint64_32(n, 28178) &&
ProbablyPrimeUint64_32(n, 450775) &&
ProbablyPrimeUint64_32(n, 9780504) &&
ProbablyPrimeUint64_32(n, 1795265022)
}
}
// NextPrime returns first prime > n and true if successful or an undefined value and false if there
// is no next prime in the uint32 limits. Typical run time is about 2 µs.
//
//TODO rename to NextPrimeUint32
func NextPrime(n uint32) (p uint32, ok bool) {
switch {
case n < 65521:
p16, _ := NextPrimeUint16(uint16(n))
return uint32(p16), true
case n >= math.MaxUint32-4:
return
}
n++
var d0, d uint32
switch mod := n % 6; mod {
case 0:
d0, d = 1, 4
case 1:
d = 4
case 2, 3, 4:
d0, d = 5-mod, 2
case 5:
d = 2
}
p = n + d0
if p < n { // overflow
return
}
for {
if IsPrime(p) {
return p, true
}
p0 := p
p += d
if p < p0 { // overflow
break
}
d ^= 6
}
return
}
// NextPrimeUint64 returns first prime > n and true if successful or an undefined value and false if there
// is no next prime in the uint64 limits. Typical run time is in hundreds of µs.
func NextPrimeUint64(n uint64) (p uint64, ok bool) {
switch {
case n < 65521:
p16, _ := NextPrimeUint16(uint16(n))
return uint64(p16), true
case n >= 18446744073709551557: // last uint64 prime
return
}
n++
var d0, d uint64
switch mod := n % 6; mod {
case 0:
d0, d = 1, 4
case 1:
d = 4
case 2, 3, 4:
d0, d = 5-mod, 2
case 5:
d = 2
}
p = n + d0
if p < n { // overflow
return
}
for {
if ok = IsPrimeUint64(p); ok {
break
}
p0 := p
p += d
if p < p0 { // overflow
break
}
d ^= 6
}
return
}
// FactorTerm is one term of an integer factorization.
type FactorTerm struct {
Prime uint32 // The divisor
Power uint32 // Term == Prime^Power
}
// FactorTerms represent a factorization of an integer
type FactorTerms []FactorTerm
// FactorInt returns prime factorization of n > 1 or nil otherwise.
// Resulting factors are ordered by Prime. Typical run time is few µs.
func FactorInt(n uint32) (f FactorTerms) {
switch {
case n < 2:
return
case IsPrime(n):
return []FactorTerm{{n, 1}}
}
f, w := make([]FactorTerm, 9), 0
for p := 2; p < len(primes16); p += int(primes16[p]) {
if uint(p*p) > uint(n) {
break
}
power := uint32(0)
for n%uint32(p) == 0 {
n /= uint32(p)
power++
}
if power != 0 {
f[w] = FactorTerm{uint32(p), power}
w++
}
if n == 1 {
break
}
}
if n != 1 {
f[w] = FactorTerm{n, 1}
w++
}
return f[:w]
}
// PrimorialProductsUint32 returns a slice of numbers in [lo, hi] which are a
// product of max 'max' primorials. The slice is not sorted.
//
// See also: http://en.wikipedia.org/wiki/Primorial
func PrimorialProductsUint32(lo, hi, max uint32) (r []uint32) {
lo64, hi64 := int64(lo), int64(hi)
if max > 31 { // N/A
max = 31
}
var f func(int64, int64, uint32)
f = func(n, p int64, emax uint32) {
e := uint32(1)
for n <= hi64 && e <= emax {
n *= p
if n >= lo64 && n <= hi64 {
r = append(r, uint32(n))
}
if n < hi64 {
p, _ := NextPrime(uint32(p))
f(n, int64(p), e)
}
e++
}
}
f(1, 2, max)
return
}

27
_vendor/vendor/github.com/cznic/mathutil/rat.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mathutil
// QCmpUint32 compares a/b and c/d and returns:
//
// -1 if a/b < c/d
// 0 if a/b == c/d
// +1 if a/b > c/d
//
func QCmpUint32(a, b, c, d uint32) int {
switch x, y := uint64(a)*uint64(d), uint64(b)*uint64(c); {
case x < y:
return -1
case x == y:
return 0
default: // x > y
return 1
}
}
// QScaleUint32 returns a such that a/b >= c/d.
func QScaleUint32(b, c, d uint32) (a uint64) {
return 1 + (uint64(b)*uint64(c))/uint64(d)
}

383
_vendor/vendor/github.com/cznic/mathutil/rnd.go generated vendored Normal file
View File

@ -0,0 +1,383 @@
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mathutil
import (
"fmt"
"math"
"math/big"
)
// FC32 is a full cycle PRNG covering the 32 bit signed integer range.
// In contrast to full cycle generators shown at e.g. http://en.wikipedia.org/wiki/Full_cycle,
// this code doesn't produce values at constant delta (mod cycle length).
// The 32 bit limit is per this implementation, the algorithm used has no intrinsic limit on the cycle size.
// Properties include:
// - Adjustable limits on creation (hi, lo).
// - Positionable/randomly accessible (Pos, Seek).
// - Repeatable (deterministic).
// - Can run forward or backward (Next, Prev).
// - For a billion numbers cycle the Next/Prev PRN can be produced in cca 100-150ns.
// That's like 5-10 times slower compared to PRNs generated using the (non FC) rand package.
type FC32 struct {
cycle int64 // On average: 3 * delta / 2, (HQ: 2 * delta)
delta int64 // hi - lo
factors [][]int64 // This trades some space for hopefully a bit of speed (multiple adding vs multiplying).
lo int
mods []int // pos % set
pos int64 // Within cycle.
primes []int64 // Ordered. ∏ primes == cycle.
set []int64 // Reordered primes (magnitude order bases) according to seed.
}
// NewFC32 returns a newly created FC32 adjusted for the closed interval [lo, hi] or an Error if any.
// If hq == true then trade some generation time for improved (pseudo)randomness.
func NewFC32(lo, hi int, hq bool) (r *FC32, err error) {
if lo > hi {
return nil, fmt.Errorf("invalid range %d > %d", lo, hi)
}
if uint64(hi)-uint64(lo) > math.MaxUint32 {
return nil, fmt.Errorf("range out of int32 limits %d, %d", lo, hi)
}
delta := int64(hi) - int64(lo)
// Find the primorial covering whole delta
n, set, p := int64(1), []int64{}, uint32(2)
if hq {
p++
}
for {
set = append(set, int64(p))
n *= int64(p)
if n > delta {
break
}
p, _ = NextPrime(p)
}
// Adjust the set so n ∊ [delta, 2 * delta] (HQ: [delta, 3 * delta])
// while keeping the cardinality of the set (correlates with the statistic "randomness quality")
// at max, i.e. discard atmost one member.
i := -1 // no candidate prime
if n > 2*(delta+1) {
for j, p := range set {
q := n / p
if q < delta+1 {
break
}
i = j // mark the highest candidate prime set index
}
}
if i >= 0 { // shrink the inner cycle
n = n / set[i]
set = delete(set, i)
}
r = &FC32{
cycle: n,
delta: delta,
factors: make([][]int64, len(set)),
lo: lo,
mods: make([]int, len(set)),
primes: set,
}
r.Seed(1) // the default seed should be always non zero
return
}
// Cycle reports the length of the inner FCPRNG cycle.
// Cycle is atmost the double (HQ: triple) of the generator period (hi - lo + 1).
func (r *FC32) Cycle() int64 {
return r.cycle
}
// Next returns the first PRN after Pos.
func (r *FC32) Next() int {
return r.step(1)
}
// Pos reports the current position within the inner cycle.
func (r *FC32) Pos() int64 {
return r.pos
}
// Prev return the first PRN before Pos.
func (r *FC32) Prev() int {
return r.step(-1)
}
// Seed uses the provided seed value to initialize the generator to a deterministic state.
// A zero seed produces a "canonical" generator with worse randomness than for most non zero seeds.
// Still, the FC property holds for any seed value.
func (r *FC32) Seed(seed int64) {
u := uint64(seed)
r.set = mix(r.primes, &u)
n := int64(1)
for i, p := range r.set {
k := make([]int64, p)
v := int64(0)
for j := range k {
k[j] = v
v += n
}
n *= p
r.factors[i] = mix(k, &u)
}
}
// Seek sets Pos to |pos| % Cycle.
func (r *FC32) Seek(pos int64) { //vet:ignore
if pos < 0 {
pos = -pos
}
pos %= r.cycle
r.pos = pos
for i, p := range r.set {
r.mods[i] = int(pos % p)
}
}
func (r *FC32) step(dir int) int {
for { // avg loops per step: 3/2 (HQ: 2)
y := int64(0)
pos := r.pos
pos += int64(dir)
switch {
case pos < 0:
pos = r.cycle - 1
case pos >= r.cycle:
pos = 0
}
r.pos = pos
for i, mod := range r.mods {
mod += dir
p := int(r.set[i])
switch {
case mod < 0:
mod = p - 1
case mod >= p:
mod = 0
}
r.mods[i] = mod
y += r.factors[i][mod]
}
if y <= r.delta {
return int(y) + r.lo
}
}
}
func delete(set []int64, i int) (y []int64) {
for j, v := range set {
if j != i {
y = append(y, v)
}
}
return
}
func mix(set []int64, seed *uint64) (y []int64) {
for len(set) != 0 {
*seed = rol(*seed)
i := int(*seed % uint64(len(set)))
y = append(y, set[i])
set = delete(set, i)
}
return
}
func rol(u uint64) (y uint64) {
y = u << 1
if int64(u) < 0 {
y |= 1
}
return
}
// FCBig is a full cycle PRNG covering ranges outside of the int32 limits.
// For more info see the FC32 docs.
// Next/Prev PRN on a 1e15 cycle can be produced in about 2 µsec.
type FCBig struct {
cycle *big.Int // On average: 3 * delta / 2, (HQ: 2 * delta)
delta *big.Int // hi - lo
factors [][]*big.Int // This trades some space for hopefully a bit of speed (multiple adding vs multiplying).
lo *big.Int
mods []int // pos % set
pos *big.Int // Within cycle.
primes []int64 // Ordered. ∏ primes == cycle.
set []int64 // Reordered primes (magnitude order bases) according to seed.
}
// NewFCBig returns a newly created FCBig adjusted for the closed interval [lo, hi] or an Error if any.
// If hq == true then trade some generation time for improved (pseudo)randomness.
func NewFCBig(lo, hi *big.Int, hq bool) (r *FCBig, err error) {
if lo.Cmp(hi) > 0 {
return nil, fmt.Errorf("invalid range %d > %d", lo, hi)
}
delta := big.NewInt(0)
delta.Add(delta, hi).Sub(delta, lo)
// Find the primorial covering whole delta
n, set, pp, p := big.NewInt(1), []int64{}, big.NewInt(0), uint32(2)
if hq {
p++
}
for {
set = append(set, int64(p))
pp.SetInt64(int64(p))
n.Mul(n, pp)
if n.Cmp(delta) > 0 {
break
}
p, _ = NextPrime(p)
}
// Adjust the set so n ∊ [delta, 2 * delta] (HQ: [delta, 3 * delta])
// while keeping the cardinality of the set (correlates with the statistic "randomness quality")
// at max, i.e. discard atmost one member.
dd1 := big.NewInt(1)
dd1.Add(dd1, delta)
dd2 := big.NewInt(0)
dd2.Lsh(dd1, 1)
i := -1 // no candidate prime
if n.Cmp(dd2) > 0 {
q := big.NewInt(0)
for j, p := range set {
pp.SetInt64(p)
q.Set(n)
q.Div(q, pp)
if q.Cmp(dd1) < 0 {
break
}
i = j // mark the highest candidate prime set index
}
}
if i >= 0 { // shrink the inner cycle
pp.SetInt64(set[i])
n.Div(n, pp)
set = delete(set, i)
}
r = &FCBig{
cycle: n,
delta: delta,
factors: make([][]*big.Int, len(set)),
lo: lo,
mods: make([]int, len(set)),
pos: big.NewInt(0),
primes: set,
}
r.Seed(1) // the default seed should be always non zero
return
}
// Cycle reports the length of the inner FCPRNG cycle.
// Cycle is atmost the double (HQ: triple) of the generator period (hi - lo + 1).
func (r *FCBig) Cycle() *big.Int {
return r.cycle
}
// Next returns the first PRN after Pos.
func (r *FCBig) Next() *big.Int {
return r.step(1)
}
// Pos reports the current position within the inner cycle.
func (r *FCBig) Pos() *big.Int {
return r.pos
}
// Prev return the first PRN before Pos.
func (r *FCBig) Prev() *big.Int {
return r.step(-1)
}
// Seed uses the provided seed value to initialize the generator to a deterministic state.
// A zero seed produces a "canonical" generator with worse randomness than for most non zero seeds.
// Still, the FC property holds for any seed value.
func (r *FCBig) Seed(seed int64) {
u := uint64(seed)
r.set = mix(r.primes, &u)
n := big.NewInt(1)
v := big.NewInt(0)
pp := big.NewInt(0)
for i, p := range r.set {
k := make([]*big.Int, p)
v.SetInt64(0)
for j := range k {
k[j] = big.NewInt(0)
k[j].Set(v)
v.Add(v, n)
}
pp.SetInt64(p)
n.Mul(n, pp)
r.factors[i] = mixBig(k, &u)
}
}
// Seek sets Pos to |pos| % Cycle.
func (r *FCBig) Seek(pos *big.Int) {
r.pos.Set(pos)
r.pos.Abs(r.pos)
r.pos.Mod(r.pos, r.cycle)
mod := big.NewInt(0)
pp := big.NewInt(0)
for i, p := range r.set {
pp.SetInt64(p)
r.mods[i] = int(mod.Mod(r.pos, pp).Int64())
}
}
func (r *FCBig) step(dir int) (y *big.Int) {
y = big.NewInt(0)
d := big.NewInt(int64(dir))
for { // avg loops per step: 3/2 (HQ: 2)
r.pos.Add(r.pos, d)
switch {
case r.pos.Sign() < 0:
r.pos.Add(r.pos, r.cycle)
case r.pos.Cmp(r.cycle) >= 0:
r.pos.SetInt64(0)
}
for i, mod := range r.mods {
mod += dir
p := int(r.set[i])
switch {
case mod < 0:
mod = p - 1
case mod >= p:
mod = 0
}
r.mods[i] = mod
y.Add(y, r.factors[i][mod])
}
if y.Cmp(r.delta) <= 0 {
y.Add(y, r.lo)
return
}
y.SetInt64(0)
}
}
func deleteBig(set []*big.Int, i int) (y []*big.Int) {
for j, v := range set {
if j != i {
y = append(y, v)
}
}
return
}
func mixBig(set []*big.Int, seed *uint64) (y []*big.Int) {
for len(set) != 0 {
*seed = rol(*seed)
i := int(*seed % uint64(len(set)))
y = append(y, set[i])
set = deleteBig(set, i)
}
return
}

6995
_vendor/vendor/github.com/cznic/mathutil/tables.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

11
_vendor/vendor/github.com/cznic/mathutil/test_deps.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mathutil
// Pull test dependencies too.
// Enables easy 'go test X' after 'go get X'
import (
// nothing yet
)

11
_vendor/vendor/github.com/cznic/parser/yacc/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,11 @@
# This file lists authors for copyright purposes. This file is distinct from
# the CONTRIBUTORS files. See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
#
# The email address is not required for organizations.
#
# Please keep the list sorted.
Jan Mercl <0xjnml@gmail.com>

View File

@ -0,0 +1,9 @@
# This file lists people who contributed code to this repository. The AUTHORS
# file lists the copyright holders; this file lists people.
#
# Names should be added to this file like so:
# Name <email address>
#
# Please keep the list sorted.
Jan Mercl <0xjnml@gmail.com>

27
_vendor/vendor/github.com/cznic/parser/yacc/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2014 The parser Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

58
_vendor/vendor/github.com/cznic/parser/yacc/Makefile generated vendored Normal file
View File

@ -0,0 +1,58 @@
# Copyright 2015 The parser Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
.PHONY: all clean cover cpu editor internalError later mem nuke todo edit
grep=--include=*.go --include=*.l --include=*.y --include=*.yy
all: editor
go vet || true
golint || true
make todo
clean:
go clean
rm -f *~ cpu.test mem.test
cover:
t=$(shell tempfile) ; go test -coverprofile $$t && go tool cover -html $$t && unlink $$t
cpu:
go test -c -o cpu.test
./cpu.test -noerr -test.cpuprofile cpu.out
go tool pprof --lines cpu.test cpu.out
edit:
gvim -p Makefile *.l *.yy *.y *.output *.go
editor: scanner.go goscanner.go parser.go
gofmt -l -s -w *.go
go test
go install
internalError:
egrep -ho '"internal error.*"' *.go | sort | cat -n
later:
@grep -n $(grep) LATER * || true
@grep -n $(grep) MAYBE * || true
mem:
go test -c -o mem.test
./mem.test -test.bench . -test.memprofile mem.out
go tool pprof --lines --web --alloc_space mem.test mem.out
nuke: clean
go clean -i
parser.go scanner.go goscanner.go: parser.yy go.l y.l
go test -i
go generate
todo:
@grep -n $(grep) ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* * || true
@grep -n $(grep) TODO * || true
@grep -n $(grep) BUG * || true
@grep -n $(grep) [^[:alpha:]]println * || true

92
_vendor/vendor/github.com/cznic/parser/yacc/api.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
// Copyright 2015 The parser Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run generate.go
//go:generate golex -o goscanner.go go.l
//go:generate golex -o scanner.go y.l
//go:generate go run generate.go -2
// Package parser implements a parser for yacc source files.
//
// Note: Rule.Body element's type
//
// int Eg. 65 represents literal 'A'
//
// string Eg. "Start" represents rule component Start
//
// *Action Mid rule action or rule semantic action
package parser
import (
"bytes"
"fmt"
"go/token"
"github.com/cznic/golex/lex"
)
const (
// ActionValueGo is used for a Go code fragment
ActionValueGo = iota
// ActionValueDlrDlr is used for $$.
ActionValueDlrDlr
// ActionValueDlrTagDlr is used for $<tag>$.
ActionValueDlrTagDlr
// ActionValueDlrNum is used for $num.
ActionValueDlrNum
// ActionValueDlrTagNum is used for $<tag>num.
ActionValueDlrTagNum
)
// ActionValue is an item of Action.Value
type ActionValue struct {
Num int // The number in $num.
Pos token.Pos // Position of the start of the ActionValue.
Src string // Source for this value.
Tag string // The tag in $<tag>$ or $<tag>num.
Type int // One of ActionValue{Go,DlrDlr,DlrTagDlr,DlrNum,DlrTagNum} constants.
}
// Token captures a lexem with position, value and comments, if any.
type Token struct {
Comments []string
Val string
File *token.File
lex.Char
}
// Pos retruns the token.Pos for t.
func (t *Token) Pos() token.Pos { return t.Char.Pos() }
// Position returns the token.Position for t
func (t *Token) Position() token.Position { return t.File.Position(t.Pos()) }
// Strings implements fmt.Stringer.
func (t *Token) String() string {
return fmt.Sprintf("%v: %v %q, Comments: %q", t.File.Position(t.Char.Pos()), yySymName(int(t.Char.Rune)), t.Val, t.Comments)
}
// Parse parses src as a single yacc source file fname and returns the
// corresponding Specification. If the source couldn't be read, the returned
// Specification is nil and the error indicates all of the specific failures.
func Parse(fset *token.FileSet, fname string, src []byte) (s *Specification, err error) {
r := bytes.NewBuffer(src)
file := fset.AddFile(fname, -1, len(src))
lx, err := newLexer(file, r)
if err != nil {
return nil, err
}
y := yyParse(lx)
n := len(lx.errors)
if y != 0 || n != 0 {
if n == 0 {
panic("internal error")
}
return nil, lx.errors
}
return lx.spec, nil
}

514
_vendor/vendor/github.com/cznic/parser/yacc/ast.go generated vendored Normal file
View File

@ -0,0 +1,514 @@
// CAUTION: Generated by yy - DO NOT EDIT.
// Copyright 2015 The parser Authors. All rights reserved. Use
// of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.
//
// This is a derived work base on the original at
//
// http://pubs.opengroup.org/onlinepubs/009695399/utilities/yacc.html
//
// The original work is
//
// Copyright © 2001-2004 The IEEE and The Open Group, All Rights reserved.
//
// Grammar for the input to yacc.
package parser
import (
"go/token"
)
// Action represents data reduced by production:
//
// Action:
// '{' '}'
type Action struct {
Values []*ActionValue // For backward compatibility.
Token *Token
Token2 *Token
}
func (n *Action) fragment() interface{} { return n }
// String implements fmt.Stringer.
func (n *Action) String() string {
return prettyString(n)
}
// Pos reports the position of the first component of n or zero if it's empty.
func (n *Action) Pos() token.Pos {
return n.Token.Pos()
}
// Definition represents data reduced by productions:
//
// Definition:
// START IDENTIFIER
// | UNION // Case 1
// | LCURL RCURL // Case 2
// | ReservedWord Tag NameList // Case 3
// | ReservedWord Tag // Case 4
// | ERROR_VERBOSE // Case 5
type Definition struct {
Nlist []*Name // For backward compatibility.
Value string
Case int
NameList *NameList
ReservedWord *ReservedWord
Tag *Tag
Token *Token
Token2 *Token
}
func (n *Definition) fragment() interface{} { return n }
// String implements fmt.Stringer.
func (n *Definition) String() string {
return prettyString(n)
}
// Pos reports the position of the first component of n or zero if it's empty.
func (n *Definition) Pos() token.Pos {
switch n.Case {
case 3, 4:
return n.ReservedWord.Pos()
case 0, 1, 2, 5:
return n.Token.Pos()
default:
panic("internal error")
}
}
// DefinitionList represents data reduced by productions:
//
// DefinitionList:
// /* empty */
// | DefinitionList Definition // Case 1
type DefinitionList struct {
Definition *Definition
DefinitionList *DefinitionList
}
func (n *DefinitionList) reverse() *DefinitionList {
if n == nil {
return nil
}
na := n
nb := na.DefinitionList
for nb != nil {
nc := nb.DefinitionList
nb.DefinitionList = na
na = nb
nb = nc
}
n.DefinitionList = nil
return na
}
func (n *DefinitionList) fragment() interface{} { return n.reverse() }
// String implements fmt.Stringer.
func (n *DefinitionList) String() string {
return prettyString(n)
}
// Pos reports the position of the first component of n or zero if it's empty.
func (n *DefinitionList) Pos() token.Pos {
if n == nil {
return 0
}
if p := n.DefinitionList.Pos(); p != 0 {
return p
}
return n.Definition.Pos()
}
// LiteralStringOpt represents data reduced by productions:
//
// LiteralStringOpt:
// /* empty */
// | STRING_LITERAL // Case 1
type LiteralStringOpt struct {
Token *Token
}
func (n *LiteralStringOpt) fragment() interface{} { return n }
// String implements fmt.Stringer.
func (n *LiteralStringOpt) String() string {
return prettyString(n)
}
// Pos reports the position of the first component of n or zero if it's empty.
func (n *LiteralStringOpt) Pos() token.Pos {
if n == nil {
return 0
}
return n.Token.Pos()
}
// Name represents data reduced by productions:
//
// Name:
// IDENTIFIER LiteralStringOpt
// | IDENTIFIER NUMBER LiteralStringOpt // Case 1
type Name struct {
Identifier interface{} // For backward compatibility.
Number int // For backward compatibility.
Case int
LiteralStringOpt *LiteralStringOpt
Token *Token
Token2 *Token
}
func (n *Name) fragment() interface{} { return n }
// String implements fmt.Stringer.
func (n *Name) String() string {
return prettyString(n)
}
// Pos reports the position of the first component of n or zero if it's empty.
func (n *Name) Pos() token.Pos {
return n.Token.Pos()
}
// NameList represents data reduced by productions:
//
// NameList:
// Name
// | NameList Name // Case 1
// | NameList ',' Name // Case 2
type NameList struct {
Case int
Name *Name
NameList *NameList
Token *Token
}
func (n *NameList) reverse() *NameList {
if n == nil {
return nil
}
na := n
nb := na.NameList
for nb != nil {
nc := nb.NameList
nb.NameList = na
na = nb
nb = nc
}
n.NameList = nil
return na
}
func (n *NameList) fragment() interface{} { return n.reverse() }
// String implements fmt.Stringer.
func (n *NameList) String() string {
return prettyString(n)
}
// Pos reports the position of the first component of n or zero if it's empty.
func (n *NameList) Pos() token.Pos {
switch n.Case {
case 0:
return n.Name.Pos()
case 1, 2:
return n.NameList.Pos()
default:
panic("internal error")
}
}
// Precedence represents data reduced by productions:
//
// Precedence:
// /* empty */
// | PREC IDENTIFIER // Case 1
// | PREC IDENTIFIER Action // Case 2
// | Precedence ';' // Case 3
type Precedence struct {
Identifier interface{} // Name string or literal int.
Action *Action
Case int
Precedence *Precedence
Token *Token
Token2 *Token
}
func (n *Precedence) fragment() interface{} { return n }
// String implements fmt.Stringer.
func (n *Precedence) String() string {
return prettyString(n)
}
// Pos reports the position of the first component of n or zero if it's empty.
func (n *Precedence) Pos() token.Pos {
switch n.Case {
case 0:
return 0
case 3:
if p := n.Precedence.Pos(); p != 0 {
return p
}
return n.Token.Pos()
case 1, 2:
return n.Token.Pos()
default:
panic("internal error")
}
}
// ReservedWord represents data reduced by productions:
//
// ReservedWord:
// TOKEN
// | LEFT // Case 1
// | RIGHT // Case 2
// | NONASSOC // Case 3
// | TYPE // Case 4
// | PRECEDENCE // Case 5
type ReservedWord struct {
Case int
Token *Token
}
func (n *ReservedWord) fragment() interface{} { return n }
// String implements fmt.Stringer.
func (n *ReservedWord) String() string {
return prettyString(n)
}
// Pos reports the position of the first component of n or zero if it's empty.
func (n *ReservedWord) Pos() token.Pos {
return n.Token.Pos()
}
// Rule represents data reduced by productions:
//
// Rule:
// C_IDENTIFIER RuleItemList Precedence
// | '|' RuleItemList Precedence // Case 1
type Rule struct {
Body []interface{} // For backward compatibility.
Name *Token
Case int
Precedence *Precedence
RuleItemList *RuleItemList
Token *Token
}
func (n *Rule) fragment() interface{} { return n }
// String implements fmt.Stringer.
func (n *Rule) String() string {
return prettyString(n)
}
// Pos reports the position of the first component of n or zero if it's empty.
func (n *Rule) Pos() token.Pos {
return n.Token.Pos()
}
// RuleItemList represents data reduced by productions:
//
// RuleItemList:
// /* empty */
// | RuleItemList IDENTIFIER // Case 1
// | RuleItemList Action // Case 2
// | RuleItemList STRING_LITERAL // Case 3
type RuleItemList struct {
Action *Action
Case int
RuleItemList *RuleItemList
Token *Token
}
func (n *RuleItemList) reverse() *RuleItemList {
if n == nil {
return nil
}
na := n
nb := na.RuleItemList
for nb != nil {
nc := nb.RuleItemList
nb.RuleItemList = na
na = nb
nb = nc
}
n.RuleItemList = nil
return na
}
func (n *RuleItemList) fragment() interface{} { return n.reverse() }
// String implements fmt.Stringer.
func (n *RuleItemList) String() string {
return prettyString(n)
}
// Pos reports the position of the first component of n or zero if it's empty.
func (n *RuleItemList) Pos() token.Pos {
switch n.Case {
case 0:
return 0
case 2:
if p := n.RuleItemList.Pos(); p != 0 {
return p
}
return n.Action.Pos()
case 1, 3:
if p := n.RuleItemList.Pos(); p != 0 {
return p
}
return n.Token.Pos()
default:
panic("internal error")
}
}
// RuleList represents data reduced by productions:
//
// RuleList:
// C_IDENTIFIER RuleItemList Precedence
// | RuleList Rule // Case 1
type RuleList struct {
Case int
Precedence *Precedence
Rule *Rule
RuleItemList *RuleItemList
RuleList *RuleList
Token *Token
}
func (n *RuleList) reverse() *RuleList {
if n == nil {
return nil
}
na := n
nb := na.RuleList
for nb != nil {
nc := nb.RuleList
nb.RuleList = na
na = nb
nb = nc
}
n.RuleList = nil
return na
}
func (n *RuleList) fragment() interface{} { return n.reverse() }
// String implements fmt.Stringer.
func (n *RuleList) String() string {
return prettyString(n)
}
// Pos reports the position of the first component of n or zero if it's empty.
func (n *RuleList) Pos() token.Pos {
switch n.Case {
case 1:
return n.RuleList.Pos()
case 0:
return n.Token.Pos()
default:
panic("internal error")
}
}
// Specification represents data reduced by production:
//
// Specification:
// DefinitionList "%%" RuleList Tail
type Specification struct {
Defs []*Definition // For backward compatibility.
Rules []*Rule // For backward compatibility.
DefinitionList *DefinitionList
RuleList *RuleList
Tail *Tail
Token *Token
}
func (n *Specification) fragment() interface{} { return n }
// String implements fmt.Stringer.
func (n *Specification) String() string {
return prettyString(n)
}
// Pos reports the position of the first component of n or zero if it's empty.
func (n *Specification) Pos() token.Pos {
if p := n.DefinitionList.Pos(); p != 0 {
return p
}
return n.Token.Pos()
}
// Tag represents data reduced by productions:
//
// Tag:
// /* empty */
// | '<' IDENTIFIER '>' // Case 1
type Tag struct {
Token *Token
Token2 *Token
Token3 *Token
}
func (n *Tag) fragment() interface{} { return n }
// String implements fmt.Stringer.
func (n *Tag) String() string {
return prettyString(n)
}
// Pos reports the position of the first component of n or zero if it's empty.
func (n *Tag) Pos() token.Pos {
if n == nil {
return 0
}
return n.Token.Pos()
}
// Tail represents data reduced by productions:
//
// Tail:
// "%%"
// | /* empty */ // Case 1
type Tail struct {
Value string
Token *Token
}
func (n *Tail) fragment() interface{} { return n }
// String implements fmt.Stringer.
func (n *Tail) String() string {
return prettyString(n)
}
// Pos reports the position of the first component of n or zero if it's empty.
func (n *Tail) Pos() token.Pos {
if n == nil {
return 0
}
return n.Token.Pos()
}

128
_vendor/vendor/github.com/cznic/parser/yacc/generate.go generated vendored Normal file
View File

@ -0,0 +1,128 @@
// Copyright 2015 The parser Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"bytes"
"flag"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
)
func yy() (nm string, err error) {
y, err := os.Create("parser.y")
if err != nil {
return "", err
}
nm = y.Name()
cmd := exec.Command(
"yy",
"-astImport", "\"go/token\"",
"-kind", "Case",
"-o", nm,
"parser.yy",
)
if out, err := cmd.CombinedOutput(); err != nil {
os.Remove(nm)
log.Printf("%s", out)
return "", err
}
return nm, nil
}
func goyacc(y string) (err error) {
t, err := ioutil.TempFile("", "go-generate-xegen-")
if err != nil {
log.Fatal(err)
}
defer func() {
if e := os.Remove(t.Name()); e != nil && err == nil {
err = e
}
}()
cmd := exec.Command("goyacc", "-o", os.DevNull, "-xegen", t.Name(), y)
if out, err := cmd.CombinedOutput(); err != nil {
log.Printf("%s\n", out)
return err
}
xerrors, err := ioutil.ReadFile("xerrors")
if err != nil {
return err
}
if _, err := t.Seek(0, 2); err != nil {
return err
}
if _, err := t.Write(xerrors); err != nil {
return err
}
cmd = exec.Command("goyacc", "-cr", "-xe", t.Name(), "-o", "parser.go", "-dlvalf", "%v", "-dlval", "prettyString(lval.Token)", y)
if out, err := cmd.CombinedOutput(); err != nil {
log.Printf("%s", out)
return err
} else {
log.Printf("%s", out)
}
return nil
}
func main() {
if err := main0(); err != nil {
log.Fatal(err)
}
}
func main0() (err error) {
log.SetFlags(log.Lshortfile)
p2 := flag.Bool("2", false, "")
flag.Parse()
if *p2 {
return main2()
}
os.Remove("ast.go")
os.Remove("ast_test.go")
y, err := yy()
if err != nil {
return err
}
return goyacc(y)
}
func main2() (err error) {
goCmd := exec.Command("go", "test", "-run", "^Example")
out, err := goCmd.CombinedOutput() // Errors are expected and wanted here.
feCmd := exec.Command("fe")
feCmd.Stdin = bytes.NewBuffer(out)
if out, err = feCmd.CombinedOutput(); err != nil {
log.Printf("%s", out)
return err
}
matches, err := filepath.Glob("*_test.go")
if err != nil {
return err
}
cmd := exec.Command("pcregrep", append([]string{"-nM", `failed|panic|\/\/ <nil>|// false|// -1|Output:\n}`}, matches...)...)
if out, _ = cmd.CombinedOutput(); len(out) != 0 { // Error != nil when no matches
log.Printf("%s", out)
}
return nil
}

65
_vendor/vendor/github.com/cznic/parser/yacc/go.l generated vendored Normal file
View File

@ -0,0 +1,65 @@
%{
// Copyright 2015 The parser Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// CAUTION: Generated file (unless this is go.l) - DO NOT EDIT!
package parser
import (
"fmt"
"github.com/cznic/golex/lex"
)
func (l *lexer) scanGo() lex.Char {
c := l.Enter()
%}
%yyc c
%yyn c = l.Next()
%yym l.Mark()
%yyt l.state
eof \x80
other \x81
intlit [0-9]+
punct [-!#%&()*+.:;<=>?@[-_|~]|"/"[^/\*]
runelit '(\\[^\n\r\x80]|[^\\'\x80\n\r])*'
strlit \x22(\\[^\n\r\x80]|[^\\\x22\x80\n\r])*\x22
white [ \t\n\r]+
comment "//"[^\x80\n\r]*|"/*"([^*\x80]|\*+[^*/\x80])*\*+\/
n -?{intlit}
tag [_a-zA-Z][_a-zA-Z0-9]*
%x DLR
%%
<*>"{" return l.char('{')
<*>"}" return l.char('}')
"$" |
<*>{intlit} |
<*>{punct} |
<*>{runelit} |
<*>{strlit} |
<*>{white} |
<*>{comment} |
<*>{other}+ return l.char(' ')
<DLR>"$$" return l.char(' ')
<DLR>"$"<{tag}>"$" return l.char(' ')
<DLR>"$"{n} return l.char(' ')
<DLR>"$"<{tag}>{n} return l.char(' ')
%%
if c, ok := l.Abort(); ok {
return l.char(c)
}
goto yyAction
}

View File

@ -0,0 +1,436 @@
// CAUTION: Generated file - DO NOT EDIT.
// Copyright 2015 The parser Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// CAUTION: Generated file (unless this is go.l) - DO NOT EDIT!
package parser
import (
"fmt"
"github.com/cznic/golex/lex"
)
func (l *lexer) scanGo() lex.Char {
c := l.Enter()
yystate0:
yyrule := -1
_ = yyrule
switch yyt := l.state; yyt {
default:
panic(fmt.Errorf(`invalid start condition %d`, yyt))
case 0: // start condition: INITIAL
goto yystart1
case 1: // start condition: DLR
goto yystart20
}
goto yystate0 // silence unused label error
goto yyAction // silence unused label error
yyAction:
switch yyrule {
case 1:
goto yyrule1
case 2:
goto yyrule2
case 3:
goto yyrule3
case 4:
goto yyrule4
case 5:
goto yyrule5
case 6:
goto yyrule6
case 7:
goto yyrule7
case 8:
goto yyrule8
case 9:
goto yyrule9
case 10:
goto yyrule10
case 11:
goto yyrule11
case 12:
goto yyrule12
case 13:
goto yyrule13
case 14:
goto yyrule14
}
goto yystate1 // silence unused label error
yystate1:
c = l.Next()
yystart1:
switch {
default:
goto yyabort
case c == '!' || c == '#' || c == '%' || c == '&' || c >= '(' && c <= '+' || c == '-' || c == '.' || c >= ':' && c <= '@' || c == '[' || c == '_' || c == '|' || c == '~':
goto yystate3
case c == '"':
goto yystate4
case c == '$':
goto yystate7
case c == '/':
goto yystate11
case c == '\'':
goto yystate8
case c == '\t' || c == '\n' || c == '\r' || c == ' ':
goto yystate2
case c == '\u0081':
goto yystate19
case c == '{':
goto yystate17
case c == '}':
goto yystate18
case c >= '0' && c <= '9':
goto yystate16
}
yystate2:
c = l.Next()
yyrule = 8
l.Mark()
switch {
default:
goto yyrule8
case c == '\t' || c == '\n' || c == '\r' || c == ' ':
goto yystate2
}
yystate3:
c = l.Next()
yyrule = 5
l.Mark()
goto yyrule5
yystate4:
c = l.Next()
switch {
default:
goto yyabort
case c == '"':
goto yystate5
case c == '\\':
goto yystate6
case c >= '\x01' && c <= '\t' || c == '\v' || c == '\f' || c >= '\x0e' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= '\u007f' || c >= '\u0081' && c <= 'ÿ':
goto yystate4
}
yystate5:
c = l.Next()
yyrule = 7
l.Mark()
goto yyrule7
yystate6:
c = l.Next()
switch {
default:
goto yyabort
case c >= '\x01' && c <= '\t' || c == '\v' || c == '\f' || c >= '\x0e' && c <= '\u007f' || c >= '\u0081' && c <= 'ÿ':
goto yystate4
}
yystate7:
c = l.Next()
yyrule = 3
l.Mark()
goto yyrule3
yystate8:
c = l.Next()
switch {
default:
goto yyabort
case c == '\'':
goto yystate9
case c == '\\':
goto yystate10
case c >= '\x01' && c <= '\t' || c == '\v' || c == '\f' || c >= '\x0e' && c <= '&' || c >= '(' && c <= '[' || c >= ']' && c <= '\u007f' || c >= '\u0081' && c <= 'ÿ':
goto yystate8
}
yystate9:
c = l.Next()
yyrule = 6
l.Mark()
goto yyrule6
yystate10:
c = l.Next()
switch {
default:
goto yyabort
case c >= '\x01' && c <= '\t' || c == '\v' || c == '\f' || c >= '\x0e' && c <= '\u007f' || c >= '\u0081' && c <= 'ÿ':
goto yystate8
}
yystate11:
c = l.Next()
switch {
default:
goto yyabort
case c == '*':
goto yystate12
case c == '/':
goto yystate15
case c >= '\x01' && c <= ')' || c >= '+' && c <= '.' || c >= '0' && c <= 'ÿ':
goto yystate3
}
yystate12:
c = l.Next()
switch {
default:
goto yyabort
case c == '*':
goto yystate13
case c >= '\x01' && c <= ')' || c >= '+' && c <= '\u007f' || c >= '\u0081' && c <= 'ÿ':
goto yystate12
}
yystate13:
c = l.Next()
switch {
default:
goto yyabort
case c == '*':
goto yystate13
case c == '/':
goto yystate14
case c >= '\x01' && c <= ')' || c >= '+' && c <= '.' || c >= '0' && c <= '\u007f' || c >= '\u0081' && c <= 'ÿ':
goto yystate12
}
yystate14:
c = l.Next()
yyrule = 9
l.Mark()
goto yyrule9
yystate15:
c = l.Next()
yyrule = 9
l.Mark()
switch {
default:
goto yyrule9
case c >= '\x01' && c <= '\t' || c == '\v' || c == '\f' || c >= '\x0e' && c <= '\u007f' || c >= '\u0081' && c <= 'ÿ':
goto yystate15
}
yystate16:
c = l.Next()
yyrule = 4
l.Mark()
switch {
default:
goto yyrule4
case c >= '0' && c <= '9':
goto yystate16
}
yystate17:
c = l.Next()
yyrule = 1
l.Mark()
goto yyrule1
yystate18:
c = l.Next()
yyrule = 2
l.Mark()
goto yyrule2
yystate19:
c = l.Next()
yyrule = 10
l.Mark()
switch {
default:
goto yyrule10
case c == '\u0081':
goto yystate19
}
goto yystate20 // silence unused label error
yystate20:
c = l.Next()
yystart20:
switch {
default:
goto yyabort
case c == '!' || c == '#' || c == '%' || c == '&' || c >= '(' && c <= '+' || c == '-' || c == '.' || c >= ':' && c <= '@' || c == '[' || c == '_' || c == '|' || c == '~':
goto yystate3
case c == '"':
goto yystate4
case c == '$':
goto yystate21
case c == '/':
goto yystate11
case c == '\'':
goto yystate8
case c == '\t' || c == '\n' || c == '\r' || c == ' ':
goto yystate2
case c == '\u0081':
goto yystate19
case c == '{':
goto yystate17
case c == '}':
goto yystate18
case c >= '0' && c <= '9':
goto yystate16
}
yystate21:
c = l.Next()
switch {
default:
goto yyabort
case c == '$':
goto yystate22
case c == '-':
goto yystate23
case c == '<':
goto yystate25
case c >= '0' && c <= '9':
goto yystate24
}
yystate22:
c = l.Next()
yyrule = 11
l.Mark()
goto yyrule11
yystate23:
c = l.Next()
switch {
default:
goto yyabort
case c >= '0' && c <= '9':
goto yystate24
}
yystate24:
c = l.Next()
yyrule = 13
l.Mark()
switch {
default:
goto yyrule13
case c >= '0' && c <= '9':
goto yystate24
}
yystate25:
c = l.Next()
switch {
default:
goto yyabort
case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate26
}
yystate26:
c = l.Next()
switch {
default:
goto yyabort
case c == '>':
goto yystate27
case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate26
}
yystate27:
c = l.Next()
switch {
default:
goto yyabort
case c == '$':
goto yystate28
case c == '-':
goto yystate29
case c >= '0' && c <= '9':
goto yystate30
}
yystate28:
c = l.Next()
yyrule = 12
l.Mark()
goto yyrule12
yystate29:
c = l.Next()
switch {
default:
goto yyabort
case c >= '0' && c <= '9':
goto yystate30
}
yystate30:
c = l.Next()
yyrule = 14
l.Mark()
switch {
default:
goto yyrule14
case c >= '0' && c <= '9':
goto yystate30
}
yyrule1: // "{"
{
return l.char('{')
}
yyrule2: // "}"
{
return l.char('}')
}
yyrule3: // "$"
yyrule4: // {intlit}
yyrule5: // {punct}
yyrule6: // {runelit}
yyrule7: // {strlit}
yyrule8: // {white}
yyrule9: // {comment}
yyrule10: // {other}+
{
return l.char(' ')
}
yyrule11: // "$$"
{
return l.char(' ')
}
yyrule12: // "$"<{tag}>"$"
{
return l.char(' ')
}
yyrule13: // "$"{n}
{
return l.char(' ')
}
yyrule14: // "$"<{tag}>{n}
{
return l.char(' ')
}
panic("unreachable")
goto yyabort // silence unused label error
yyabort: // no lexem recognized
if c, ok := l.Abort(); ok {
return l.char(c)
}
goto yyAction
}

656
_vendor/vendor/github.com/cznic/parser/yacc/lexer.go generated vendored Normal file
View File

@ -0,0 +1,656 @@
// Copyright 2015 The parser Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package parser
import (
"bytes"
"fmt"
"go/scanner"
"go/token"
"io"
"reflect"
"sort"
"strconv"
"strings"
"github.com/cznic/golex/lex"
"github.com/cznic/strutil"
)
// Node represents an AST node.
type Node interface {
Pos() token.Pos
}
const (
ccEOF = iota + 0x80
ccOther
)
type lexer struct {
*lex.Lexer
comments []string
defs []*Definition
errors scanner.ErrorList
example interface{}
exampleRule int
hold *Token
lastCommentLast lex.Char
lastTok *Token
marks int
pos token.Pos
pos2 token.Pos
positions []token.Pos
positions2 []token.Pos
ruleName *Token
rules []*Rule
spec *Specification
state int
value string
value2 string
values []string
values2 []string
}
func newLexer(file *token.File, src io.RuneReader) (_ *lexer, err error) {
l := &lexer{}
if l.Lexer, err = lex.New(
file,
src,
lex.ErrorFunc(func(pos token.Pos, msg string) {
l.errors.Add(l.File.Position(pos), msg)
}),
lex.RuneClass(runeClass),
); err != nil {
return nil, err
}
return l, nil
}
func (l *lexer) scan0() lex.Char {
again:
switch c := l.scan(); c.Rune {
case COMMENT:
val := string(l.TokenBytes(nil))
lastCommentLastLine := -1
if p := l.lastCommentLast.Pos(); p.IsValid() {
lastCommentLastLine = l.File.Position(p).Line
}
firstLine := l.File.Position(l.First.Pos()).Line
switch {
case firstLine-lastCommentLastLine <= 1: // Adjacent
l.comments = append(l.comments, val)
default: // New comment block
l.comments = []string{val}
}
l.lastCommentLast = l.Prev
goto again
default:
lastCommentLastLine := -1
if p := l.lastCommentLast.Pos(); p.IsValid() {
lastCommentLastLine = l.File.Position(p).Line
}
firstLine := l.File.Position(l.First.Pos()).Line
if firstLine-lastCommentLastLine > 1 { // Non adjacent comment(s)
l.comments = nil
}
return c
}
}
func (l *lexer) lex() *Token {
var c lex.Char
var t *Token
if t = l.hold; t != nil {
l.hold = nil
c = t.Char
} else {
c = l.scan0()
t = &Token{File: l.File, Char: c, Val: string(l.TokenBytes(l.tokenBuilder)), Comments: l.comments}
}
l.comments = nil
if r := c.Rune; r != IDENTIFIER {
return t
}
c2 := l.scan0()
if c2.Rune == ':' {
t.Char.Rune = C_IDENTIFIER
return t
}
l.hold = &Token{File: l.File, Char: c2, Val: string(l.TokenBytes(l.tokenBuilder)), Comments: l.comments}
l.comments = nil
return t
}
// yyLexer
func (l *lexer) Lex(lval *yySymType) int {
t := l.lex()
switch t.Char.Rune {
case MARK:
l.marks++
if l.marks == 2 {
l.Rule0()
for l.Next() != lex.RuneEOF {
}
v := l.TokenBytes(nil)
l.value = string(v[:len(v)-1])
}
case UNION:
l.state = 0
l.pos = l.Lookahead().Pos()
var val []byte
loop:
for balance := -1; balance != 0; {
l.Rule0()
c := l.scanGo()
r := c.Rune
if r != lex.RuneEOF {
val = append(val, l.TokenBytes(nil)...)
}
switch r := c.Rune; r {
case lex.RuneEOF:
break loop
case '{':
if balance < 0 {
balance = 1
break
}
balance++
case '}':
balance--
}
}
l.value = string(val)
case LCURL:
l.state = 0
l.pos = l.Lookahead().Pos()
var val []byte
var prev lex.Char
loop2:
for {
l.Rule0()
c := l.scanGo()
r := c.Rune
if r != lex.RuneEOF {
val = append(val, l.TokenBytes(nil)...)
}
switch r := c.Rune; r {
case lex.RuneEOF:
break loop2
case '}':
if prev.Rune == '%' {
l.Unget(c, prev)
break loop2
}
}
prev = l.Prev
}
l.value = string(val[:len(val)-2])
case '{':
l.state = 1
l.pos = l.Prev.Pos()
l.values = []string{string(l.TokenBytes(nil))}
l.positions = []token.Pos{l.First.Pos()}
balance := 1
loop3:
for {
l.Rule0()
c := l.scanGo()
r := c.Rune
part := string(l.TokenBytes(nil))
if r != lex.RuneEOF {
switch {
case strings.HasPrefix(part, "$"):
l.values = append(l.values, part)
l.positions = append(l.positions, l.First.Pos())
default:
n := len(l.values) - 1
s := l.values[n]
if strings.HasPrefix(s, "$") {
l.values = append(l.values, part)
l.positions = append(l.positions, l.First.Pos())
break
}
l.values[n] = s + part
}
}
switch r {
case lex.RuneEOF:
break loop3
case '{':
balance++
case '}':
balance--
if balance == 0 {
l.Unget(l.Lookahead(), c)
break loop3
}
}
}
}
lval.Token = t
l.lastTok = t
return int(t.Rune)
}
// yyLexer
func (l *lexer) Error(msg string) {
l.err(l.lastTok.Char.Pos(), "%v", msg)
}
// yyLexerEx
func (l *lexer) Reduced(rule, state int, lval *yySymType) (stop bool) {
if n := l.exampleRule; n >= 0 && rule != n {
return false
}
switch x := lval.node.(type) {
case interface {
fragment() interface{}
}:
l.example = x.fragment()
default:
l.example = x
}
return true
}
func (l *lexer) err(pos token.Pos, msg string, arg ...interface{}) {
l.errors.Add(l.File.Position(pos), fmt.Sprintf(msg, arg...))
}
func (l *lexer) char(r int) lex.Char {
return lex.NewChar(l.First.Pos(), rune(r))
}
func (l *lexer) errChar(c lex.Char, msg string, arg ...interface{}) {
l.err(c.Pos(), msg, arg...)
}
func (l *lexer) byteValue(dst *bytes.Buffer, in []lex.Char, report rune) int {
switch r := in[1].Rune; {
case r == '\'':
if r == report {
l.errChar(in[1], "unknown escape sequence: '")
}
dst.WriteString(strconv.QuoteRune('\''))
return 2
case r == '"':
if r == report {
l.errChar(in[1], "unknown escape sequence: \"")
}
dst.WriteString(strconv.QuoteRune('"'))
return 2
case r == '\\':
dst.WriteString(strconv.QuoteRune('\\'))
return 2
case r == 'a':
dst.WriteString(strconv.QuoteRune('\a'))
return 2
case r == 'b':
dst.WriteString(strconv.QuoteRune('\b'))
return 2
case r == 'f':
dst.WriteString(strconv.QuoteRune('\f'))
return 2
case r == 'n':
dst.WriteString(strconv.QuoteRune('\n'))
return 2
case r == 'r':
dst.WriteString(strconv.QuoteRune('\r'))
return 2
case r == 't':
dst.WriteString(strconv.QuoteRune('\t'))
return 2
case r == 'v':
dst.WriteString(strconv.QuoteRune('\v'))
return 2
case r >= '0' && r <= '7':
val := r - '0'
n := 2
for _, v := range in[2:] {
r = v.Rune
if r < '0' || r > '7' {
l.errChar(v, "non-octal character in escape sequence: %c", r)
return n
}
val = val<<3 + r - '0'
n++
if n == 4 {
dst.WriteString(strconv.QuoteRune(rune(byte(val))))
return n
}
}
case r == 'x':
val := 0
n := 2
for _, v := range in[2:] {
r = v.Rune
if !isHex(r) {
l.errChar(v, "non-hex character in escape sequence: %c", r)
return n
}
val = val<<4 + hexNibble(r)
n++
if n == 4 {
dst.WriteString(strconv.QuoteRune(rune(byte(val))))
return n
}
}
case r == 'u':
r := rune(hexNibble(in[2].Rune)<<12 |
hexNibble(in[3].Rune)<<8 |
hexNibble(in[4].Rune)<<4 |
hexNibble(in[5].Rune))
if !isValidRune(r) {
l.errChar(l.First, "escape sequence is invalid Unicode code point")
}
dst.WriteString(strconv.QuoteRune(r))
return 6
case r == 'U':
r := rune(hexNibble(in[2].Rune)<<28 |
hexNibble(in[3].Rune)<<24 |
hexNibble(in[4].Rune)<<20 |
hexNibble(in[5].Rune)<<16 |
hexNibble(in[6].Rune)<<12 |
hexNibble(in[7].Rune)<<8 |
hexNibble(in[8].Rune)<<4 |
hexNibble(in[9].Rune))
if !isValidRune(r) {
l.errChar(l.First, "escape sequence is invalid Unicode code point")
}
dst.WriteString(strconv.QuoteRune(r))
return 10
}
panic("internal error")
}
func (l *lexer) parseActionValue(pos token.Pos, src string) *ActionValue {
src0 := src
if !strings.HasPrefix(src, "$") {
return &ActionValue{Type: ActionValueGo, Src: src0, Pos: pos}
}
if src == "$$" {
return &ActionValue{Type: ActionValueDlrDlr, Src: src0, Pos: pos}
}
var tag string
src = src[1:] // Remove leading $
if strings.HasPrefix(src, "<") {
i := strings.Index(src, ">")
if i < 0 {
panic("internal error")
}
tag = src[len("<"):i]
src = src[i+1:]
}
if src == "$" {
return &ActionValue{Type: ActionValueDlrTagDlr, Tag: tag, Src: src0, Pos: pos}
}
n, err := strconv.ParseInt(src, 10, 31)
if err != nil {
l.err(pos, "%v: %s", err, src)
return nil
}
if tag != "" {
return &ActionValue{Type: ActionValueDlrTagNum, Tag: tag, Num: int(n), Src: src0, Pos: pos}
}
return &ActionValue{Type: ActionValueDlrNum, Num: int(n), Src: src0, Pos: pos}
}
func (l *lexer) tokenBuilder(buf *bytes.Buffer) {
in := l.Token()
switch r := in[0].Rune; {
case r == '\'':
r := in[1].Rune
if r == '\\' {
l.byteValue(buf, in[1:], '"')
return
}
if r == '\'' {
l.errChar(in[1], "empty character literal or unescaped ' in character literal")
}
buf.WriteString(strconv.QuoteRune(r))
default:
for _, c := range in {
buf.WriteRune(c.Rune)
}
}
}
func (l *lexer) ident(t *Token) interface{} {
s := t.Val
if s[0] == '\'' {
s, err := strconv.Unquote(s)
if err != nil && l != nil {
l.err(t.Pos(), "%v", err)
return 0
}
return int([]rune(s)[0])
}
return s
}
func (l *lexer) number(t *Token) int {
n, err := strconv.ParseUint(t.Val, 10, 31)
if err != nil {
l.err(t.Pos(), "%v", err)
}
return int(n)
}
func isValidRune(r rune) bool {
return !(r >= 0xd800 && r <= 0xdfff || r > 0x10ffff)
}
func isHex(r rune) bool {
return r >= '0' && r <= '9' || r >= 'a' && r <= 'f' || r >= 'A' && r <= 'F'
}
func hexNibble(r rune) int {
if r <= '9' {
return int(r) - '0'
}
if r >= 'a' {
return int(r) - 'a' + 10
}
return int(r) - 'A' + 10
}
func runeClass(r rune) int {
if r >= 0 && r < 0x80 {
return int(r)
}
if r == lex.RuneEOF {
return ccEOF
}
return ccOther
}
func exampleAST(rule int, src string) interface{} {
r := bytes.NewBufferString(src)
file := token.NewFileSet().AddFile("example.y", -1, len(src))
lx, err := newLexer(file, r)
if err != nil {
return fmt.Errorf("failed: %v", err)
}
lx.exampleRule = rule
yyParse(lx)
return lx.example
}
func prettyString(v interface{}) string {
var b bytes.Buffer
prettyPrint(&b, v)
return b.String()
}
func prettyPrint(w io.Writer, v interface{}) {
if v == nil {
return
}
f := strutil.IndentFormatter(w, "· ")
defer func() {
if e := recover(); e != nil {
f.Format("\npanic: %v", e)
}
}()
prettyPrint0(nil, f, "", "", v)
}
func prettyPrint0(protect map[interface{}]struct{}, sf strutil.Formatter, prefix, suffix string, v interface{}) {
if v == nil {
return
}
switch x := v.(type) {
case *Token:
if x == nil {
return
}
sf.Format("%s%v"+suffix, prefix, x.String())
return
}
rt := reflect.TypeOf(v)
rv := reflect.ValueOf(v)
switch rt.Kind() {
case reflect.Slice:
if rv.Len() == 0 {
return
}
sf.Format("%s[]%T{ // len %d%i\n", prefix, rv.Index(0).Interface(), rv.Len())
for i := 0; i < rv.Len(); i++ {
prettyPrint0(protect, sf, fmt.Sprintf("%d: ", i), ",\n", rv.Index(i).Interface())
}
sf.Format("%u}" + suffix)
case reflect.Struct:
sf.Format("%s%T{%i\n", prefix, v)
for i := 0; i < rt.NumField(); i++ {
f := rv.Field(i)
if !f.CanInterface() {
continue
}
prettyPrint0(protect, sf, fmt.Sprintf("%s: ", rt.Field(i).Name), ",\n", f.Interface())
}
sf.Format("%u}" + suffix)
case reflect.Ptr:
if rv.IsNil() {
return
}
rvi := rv.Interface()
if _, ok := protect[rvi]; ok {
sf.Format("%s&%T{ /* recursive/repetitive pointee not shown */ }"+suffix, prefix, rv.Elem().Interface())
return
}
if protect == nil {
protect = map[interface{}]struct{}{}
}
protect[rvi] = struct{}{}
prettyPrint0(protect, sf, prefix+"&", suffix, rv.Elem().Interface())
case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8:
if v := rv.Int(); v != 0 {
sf.Format("%s%v"+suffix, prefix, v)
}
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8:
if v := rv.Uint(); v != 0 {
sf.Format("%s%v"+suffix, prefix, rv.Uint())
}
case reflect.Bool:
if v := rv.Bool(); v {
sf.Format("%s%v"+suffix, prefix, rv.Bool())
}
case reflect.String:
s := rv.Interface().(string)
if s == "" {
return
}
sf.Format("%s%q"+suffix, prefix, s)
case reflect.Map:
keys := rv.MapKeys()
if len(keys) == 0 {
return
}
var buf bytes.Buffer
nf := strutil.IndentFormatter(&buf, "· ")
var skeys []string
for i, k := range keys {
prettyPrint0(protect, nf, "", "", k.Interface())
skeys = append(skeys, fmt.Sprintf("%s%10d", buf.Bytes(), i))
}
sort.Strings(skeys)
sf.Format("%s%T{%i\n", prefix, v)
for _, k := range skeys {
si := strings.TrimSpace(k[len(k)-10:])
k = k[:len(k)-10]
n, _ := strconv.ParseUint(si, 10, 64)
mv := rv.MapIndex(keys[n])
prettyPrint0(protect, sf, fmt.Sprintf("%s: ", k), ",\n", mv.Interface())
}
sf.Format("%u}" + suffix)
default:
panic(fmt.Sprintf("prettyPrint: missing support for reflect.Kind == %v", rt.Kind()))
}
}
func (r *Rule) collect() {
for n := r.RuleItemList; n != nil; n = n.RuleItemList {
switch n.Case {
case 0: // /* empty */
return
case 1: // RuleItemList IDENTIFIER
r.Body = append(r.Body, (*lexer)(nil).ident(n.Token))
case 2: // RuleItemList Action
r.Body = append(r.Body, n.Action)
case 3: // RuleItemList STRING_LITERAL
r.Body = append(r.Body, n.Token.Val)
}
}
p := r.Precedence
if p == nil {
return
}
for p != nil && p.Case == 3 { // Precedence ';'
p = p.Precedence
}
if p != nil && p.Action != nil {
r.Body = append(r.Body, p.Action)
}
}

938
_vendor/vendor/github.com/cznic/parser/yacc/parser.go generated vendored Normal file
View File

@ -0,0 +1,938 @@
// CAUTION: Generated file - DO NOT EDIT.
// Copyright 2015 The parser Authors. All rights reserved. Use
// of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.
//
// This is a derived work base on the original at
//
// http://pubs.opengroup.org/onlinepubs/009695399/utilities/yacc.html
//
// The original work is
//
// Copyright © 2001-2004 The IEEE and The Open Group, All Rights reserved.
//
// Grammar for the input to yacc.
package parser
import __yyfmt__ "fmt"
import (
"go/token"
)
type yySymType struct {
yys int
node Node
Token *Token
}
type yyXError struct {
state, xsym int
}
const (
yyDefault = 57366
yyEofCode = 57344
COMMENT = 57346
C_IDENTIFIER = 57347
ERROR_VERBOSE = 57348
IDENTIFIER = 57349
LCURL = 57350
LEFT = 57351
MARK = 57352
NONASSOC = 57353
NUMBER = 57354
PREC = 57355
PRECEDENCE = 57356
RCURL = 57357
RIGHT = 57358
START = 57359
STRING_LITERAL = 57360
TOKEN = 57361
TYPE = 57362
UNION = 57363
yyErrCode = 57345
yyMaxDepth = 200
yyTabOfs = -42
)
var (
yyXLAT = map[int]int{
57352: 0, // MARK (43x)
57349: 1, // IDENTIFIER (32x)
57348: 2, // ERROR_VERBOSE (25x)
57350: 3, // LCURL (25x)
57351: 4, // LEFT (25x)
57353: 5, // NONASSOC (25x)
57356: 6, // PRECEDENCE (25x)
57358: 7, // RIGHT (25x)
57359: 8, // START (25x)
57361: 9, // TOKEN (25x)
57362: 10, // TYPE (25x)
57363: 11, // UNION (25x)
57344: 12, // $end (21x)
57347: 13, // C_IDENTIFIER (19x)
124: 14, // '|' (18x)
59: 15, // ';' (16x)
57360: 16, // STRING_LITERAL (12x)
123: 17, // '{' (11x)
57355: 18, // PREC (10x)
44: 19, // ',' (9x)
60: 20, // '<' (7x)
57367: 21, // Action (4x)
57371: 22, // Name (3x)
57373: 23, // Precedence (3x)
57376: 24, // RuleItemList (3x)
125: 25, // '}' (2x)
57370: 26, // LiteralStringOpt (2x)
57357: 27, // RCURL (2x)
57364: 28, // $@1 (1x)
57365: 29, // $@2 (1x)
62: 30, // '>' (1x)
57368: 31, // Definition (1x)
57369: 32, // DefinitionList (1x)
57372: 33, // NameList (1x)
57354: 34, // NUMBER (1x)
57374: 35, // ReservedWord (1x)
57375: 36, // Rule (1x)
57377: 37, // RuleList (1x)
57378: 38, // Specification (1x)
57379: 39, // Tag (1x)
57380: 40, // Tail (1x)
57366: 41, // $default (0x)
57346: 42, // COMMENT (0x)
57345: 43, // error (0x)
}
yySymNames = []string{
"MARK",
"IDENTIFIER",
"ERROR_VERBOSE",
"LCURL",
"LEFT",
"NONASSOC",
"PRECEDENCE",
"RIGHT",
"START",
"TOKEN",
"TYPE",
"UNION",
"$end",
"C_IDENTIFIER",
"'|'",
"';'",
"STRING_LITERAL",
"'{'",
"PREC",
"','",
"'<'",
"Action",
"Name",
"Precedence",
"RuleItemList",
"'}'",
"LiteralStringOpt",
"RCURL",
"$@1",
"$@2",
"'>'",
"Definition",
"DefinitionList",
"NameList",
"NUMBER",
"ReservedWord",
"Rule",
"RuleList",
"Specification",
"Tag",
"Tail",
"$default",
"COMMENT",
"error",
}
yyTokenLiteralStrings = map[int]string{
57352: "%%",
57349: "identifier",
57348: "%error-verbose",
57350: "%{",
57351: "%left",
57353: "%nonassoc",
57356: "%precedence",
57358: "%right",
57359: "%start",
57361: "%token",
57362: "%type",
57363: "%union",
57347: "rule name",
57360: "string literal",
57355: "%prec",
57357: "%}",
57354: "number",
}
yyReductions = map[int]struct{ xsym, components int }{
0: {0, 1},
1: {28, 0},
2: {21, 3},
3: {31, 2},
4: {31, 1},
5: {29, 0},
6: {31, 3},
7: {31, 3},
8: {31, 2},
9: {31, 1},
10: {32, 0},
11: {32, 2},
12: {26, 0},
13: {26, 1},
14: {22, 2},
15: {22, 3},
16: {33, 1},
17: {33, 2},
18: {33, 3},
19: {23, 0},
20: {23, 2},
21: {23, 3},
22: {23, 2},
23: {35, 1},
24: {35, 1},
25: {35, 1},
26: {35, 1},
27: {35, 1},
28: {35, 1},
29: {36, 3},
30: {36, 3},
31: {24, 0},
32: {24, 2},
33: {24, 2},
34: {24, 2},
35: {37, 3},
36: {37, 2},
37: {38, 4},
38: {39, 0},
39: {39, 3},
40: {40, 1},
41: {40, 0},
}
yyXErrors = map[yyXError]string{
yyXError{0, 12}: "invalid empty input",
yyXError{1, -1}: "expected $end",
yyXError{21, -1}: "expected $end",
yyXError{22, -1}: "expected $end",
yyXError{5, -1}: "expected %}",
yyXError{53, -1}: "expected %}",
yyXError{41, -1}: "expected '>'",
yyXError{24, -1}: "expected '}'",
yyXError{33, -1}: "expected '}'",
yyXError{23, -1}: "expected Action or Precedence or one of [$end, %%, %prec, ';', '{', '|', identifier, rule name, string literal]",
yyXError{35, -1}: "expected Action or Precedence or one of [$end, %%, %prec, ';', '{', '|', identifier, rule name, string literal]",
yyXError{37, -1}: "expected Action or Precedence or one of [$end, %%, %prec, ';', '{', '|', identifier, rule name, string literal]",
yyXError{31, -1}: "expected Action or one of [$end, %%, ';', '{', '|', rule name]",
yyXError{2, -1}: "expected Definition or one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{]",
yyXError{44, -1}: "expected LiteralStringOpt or one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, ',', identifier, number, string literal]",
yyXError{48, -1}: "expected LiteralStringOpt or one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, ',', identifier, string literal]",
yyXError{51, -1}: "expected Name or identifier",
yyXError{43, -1}: "expected Name or one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, ',', identifier]",
yyXError{6, -1}: "expected NameList or Tag or one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, '<', identifier]",
yyXError{39, -1}: "expected NameList or one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, identifier]",
yyXError{16, -1}: "expected Precedence or RuleItemList or one of [$end, %%, %prec, ';', '{', '|', identifier, rule name, string literal]",
yyXError{18, -1}: "expected Precedence or RuleItemList or one of [$end, %%, %prec, ';', '{', '|', identifier, rule name, string literal]",
yyXError{19, -1}: "expected Precedence or RuleItemList or one of [$end, %%, %prec, ';', '{', '|', identifier, rule name, string literal]",
yyXError{17, -1}: "expected Rule or Tail or one of [$end, %%, '|', rule name]",
yyXError{15, -1}: "expected RuleList or rule name",
yyXError{0, -1}: "expected Specification or one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{]",
yyXError{3, -1}: "expected identifier",
yyXError{25, -1}: "expected identifier",
yyXError{40, -1}: "expected identifier",
yyXError{27, -1}: "expected one of [$end, %%, %prec, ';', '{', '|', identifier, rule name, string literal]",
yyXError{28, -1}: "expected one of [$end, %%, %prec, ';', '{', '|', identifier, rule name, string literal]",
yyXError{29, -1}: "expected one of [$end, %%, %prec, ';', '{', '|', identifier, rule name, string literal]",
yyXError{34, -1}: "expected one of [$end, %%, %prec, ';', '{', '|', identifier, rule name, string literal]",
yyXError{26, -1}: "expected one of [$end, %%, ';', '|', rule name]",
yyXError{30, -1}: "expected one of [$end, %%, ';', '|', rule name]",
yyXError{32, -1}: "expected one of [$end, %%, ';', '|', rule name]",
yyXError{36, -1}: "expected one of [$end, %%, ';', '|', rule name]",
yyXError{38, -1}: "expected one of [$end, %%, ';', '|', rule name]",
yyXError{20, -1}: "expected one of [$end, %%, '|', rule name]",
yyXError{45, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, ',', identifier]",
yyXError{46, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, ',', identifier]",
yyXError{47, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, ',', identifier]",
yyXError{49, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, ',', identifier]",
yyXError{50, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, ',', identifier]",
yyXError{52, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, ',', identifier]",
yyXError{9, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, '<', identifier]",
yyXError{10, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, '<', identifier]",
yyXError{11, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, '<', identifier]",
yyXError{12, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, '<', identifier]",
yyXError{13, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, '<', identifier]",
yyXError{14, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, '<', identifier]",
yyXError{42, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{, identifier]",
yyXError{4, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{]",
yyXError{7, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{]",
yyXError{8, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{]",
yyXError{54, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{]",
yyXError{55, -1}: "expected one of [%%, %error-verbose, %left, %nonassoc, %precedence, %right, %start, %token, %type, %union, %{]",
}
yyParseTab = [56][]uint8{
// 0
{32, 2: 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32: 44, 38: 43},
{12: 42},
{57, 2: 49, 47, 52, 54, 56, 53, 45, 51, 55, 46, 31: 50, 35: 48},
{1: 97},
{38, 2: 38, 38, 38, 38, 38, 38, 38, 38, 38, 38},
// 5
{27: 37, 29: 95},
{4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 20: 82, 39: 81},
{33, 2: 33, 33, 33, 33, 33, 33, 33, 33, 33, 33},
{31, 2: 31, 31, 31, 31, 31, 31, 31, 31, 31, 31},
{19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 20: 19},
// 10
{18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 20: 18},
{17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 20: 17},
{16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 20: 16},
{15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 20: 15},
{14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 20: 14},
// 15
{13: 58, 37: 59},
{11, 11, 12: 11, 11, 11, 11, 11, 11, 11, 24: 79},
{64, 12: 1, 60, 61, 36: 62, 40: 63},
{11, 11, 12: 11, 11, 11, 11, 11, 11, 11, 24: 77},
{11, 11, 12: 11, 11, 11, 11, 11, 11, 11, 24: 65},
// 20
{6, 12: 6, 6, 6},
{12: 5},
{12: 2},
{23, 69, 12: 23, 23, 23, 23, 71, 66, 67, 21: 70, 23: 68},
{25: 41, 28: 75},
// 25
{1: 73},
{12, 12: 12, 12, 12, 72},
{10, 10, 12: 10, 10, 10, 10, 10, 10, 10},
{9, 9, 12: 9, 9, 9, 9, 9, 9, 9},
{8, 8, 12: 8, 8, 8, 8, 8, 8, 8},
// 30
{20, 12: 20, 20, 20, 20},
{22, 12: 22, 22, 22, 22, 17: 66, 21: 74},
{21, 12: 21, 21, 21, 21},
{25: 76},
{40, 40, 12: 40, 40, 40, 40, 40, 40, 40},
// 35
{23, 69, 12: 23, 23, 23, 23, 71, 66, 67, 21: 70, 23: 78},
{13, 12: 13, 13, 13, 72},
{23, 69, 12: 23, 23, 23, 23, 71, 66, 67, 21: 70, 23: 80},
{7, 12: 7, 7, 7, 72},
{34, 86, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 22: 87, 33: 85},
// 40
{1: 83},
{30: 84},
{3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
{35, 86, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 19: 93, 22: 92},
{30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 16: 88, 19: 30, 26: 89, 34: 90},
// 45
{26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 19: 26},
{29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 19: 29},
{28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 19: 28},
{30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 16: 88, 19: 30, 26: 91},
{27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 19: 27},
// 50
{25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 19: 25},
{1: 86, 22: 94},
{24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 19: 24},
{27: 96},
{36, 2: 36, 36, 36, 36, 36, 36, 36, 36, 36, 36},
// 55
{39, 2: 39, 39, 39, 39, 39, 39, 39, 39, 39, 39},
}
)
var yyDebug = 0
type yyLexer interface {
Lex(lval *yySymType) int
Error(s string)
}
type yyLexerEx interface {
yyLexer
Reduced(rule, state int, lval *yySymType) bool
}
func yySymName(c int) (s string) {
x, ok := yyXLAT[c]
if ok {
return yySymNames[x]
}
if c < 0x7f {
return __yyfmt__.Sprintf("'%c'", c)
}
return __yyfmt__.Sprintf("%d", c)
}
func yylex1(yylex yyLexer, lval *yySymType) (n int) {
n = yylex.Lex(lval)
if n <= 0 {
n = yyEofCode
}
if yyDebug >= 3 {
__yyfmt__.Printf("\nlex %s(%#x %d), prettyString(lval.Token): %v\n", yySymName(n), n, n, prettyString(lval.Token))
}
return n
}
func yyParse(yylex yyLexer) int {
const yyError = 43
yyEx, _ := yylex.(yyLexerEx)
var yyn int
var yylval yySymType
var yyVAL yySymType
yyS := make([]yySymType, 200)
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
yyerrok := func() {
if yyDebug >= 2 {
__yyfmt__.Printf("yyerrok()\n")
}
Errflag = 0
}
_ = yyerrok
yystate := 0
yychar := -1
var yyxchar int
var yyshift int
yyp := -1
goto yystack
ret0:
return 0
ret1:
return 1
yystack:
/* put a state and value onto the stack */
yyp++
if yyp >= len(yyS) {
nyys := make([]yySymType, len(yyS)*2)
copy(nyys, yyS)
yyS = nyys
}
yyS[yyp] = yyVAL
yyS[yyp].yys = yystate
yynewstate:
if yychar < 0 {
yychar = yylex1(yylex, &yylval)
var ok bool
if yyxchar, ok = yyXLAT[yychar]; !ok {
yyxchar = len(yySymNames) // > tab width
}
}
if yyDebug >= 4 {
var a []int
for _, v := range yyS[:yyp+1] {
a = append(a, v.yys)
}
__yyfmt__.Printf("state stack %v\n", a)
}
row := yyParseTab[yystate]
yyn = 0
if yyxchar < len(row) {
if yyn = int(row[yyxchar]); yyn != 0 {
yyn += yyTabOfs
}
}
switch {
case yyn > 0: // shift
yychar = -1
yyVAL = yylval
yystate = yyn
yyshift = yyn
if yyDebug >= 2 {
__yyfmt__.Printf("shift, and goto state %d\n", yystate)
}
if Errflag > 0 {
Errflag--
}
goto yystack
case yyn < 0: // reduce
case yystate == 1: // accept
if yyDebug >= 2 {
__yyfmt__.Println("accept")
}
goto ret0
}
if yyn == 0 {
/* error ... attempt to resume parsing */
switch Errflag {
case 0: /* brand new error */
if yyDebug >= 1 {
__yyfmt__.Printf("no action for %s in state %d\n", yySymName(yychar), yystate)
}
msg, ok := yyXErrors[yyXError{yystate, yyxchar}]
if !ok {
msg, ok = yyXErrors[yyXError{yystate, -1}]
}
if !ok && yyshift != 0 {
msg, ok = yyXErrors[yyXError{yyshift, yyxchar}]
}
if !ok {
msg, ok = yyXErrors[yyXError{yyshift, -1}]
}
if yychar > 0 {
ls := yyTokenLiteralStrings[yychar]
if ls == "" {
ls = yySymName(yychar)
}
if ls != "" {
switch {
case msg == "":
msg = __yyfmt__.Sprintf("unexpected %s", ls)
default:
msg = __yyfmt__.Sprintf("unexpected %s, %s", ls, msg)
}
}
}
if msg == "" {
msg = "syntax error"
}
yylex.Error(msg)
Nerrs++
fallthrough
case 1, 2: /* incompletely recovered error ... try again */
Errflag = 3
/* find a state where "error" is a legal shift action */
for yyp >= 0 {
row := yyParseTab[yyS[yyp].yys]
if yyError < len(row) {
yyn = int(row[yyError]) + yyTabOfs
if yyn > 0 { // hit
if yyDebug >= 2 {
__yyfmt__.Printf("error recovery found error shift in state %d\n", yyS[yyp].yys)
}
yystate = yyn /* simulate a shift of "error" */
goto yystack
}
}
/* the current p has no shift on "error", pop stack */
if yyDebug >= 2 {
__yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
}
yyp--
}
/* there is no state on the stack with an error shift ... abort */
if yyDebug >= 2 {
__yyfmt__.Printf("error recovery failed\n")
}
goto ret1
case 3: /* no shift yet; clobber input char */
if yyDebug >= 2 {
__yyfmt__.Printf("error recovery discards %s\n", yySymName(yychar))
}
if yychar == yyEofCode {
goto ret1
}
yychar = -1
goto yynewstate /* try again in the same state */
}
}
r := -yyn
x0 := yyReductions[r]
x, n := x0.xsym, x0.components
yypt := yyp
_ = yypt // guard against "declared and not used"
yyp -= n
if yyp+1 >= len(yyS) {
nyys := make([]yySymType, len(yyS)*2)
copy(nyys, yyS)
yyS = nyys
}
yyVAL = yyS[yyp+1]
/* consult goto table to find next state */
exState := yystate
yystate = int(yyParseTab[yyS[yyp].yys][x]) + yyTabOfs
/* reduction by production r */
if yyDebug >= 2 {
__yyfmt__.Printf("reduce using rule %v (%s), and goto state %d\n", r, yySymNames[x], yystate)
}
switch r {
case 1:
{
lx := yylex.(*lexer)
lx.values2 = append([]string(nil), lx.values...)
lx.positions2 = append([]token.Pos(nil), lx.positions...)
}
case 2:
{
lx := yylex.(*lexer)
lhs := &Action{
Token: yyS[yypt-2].Token,
Token2: yyS[yypt-0].Token,
}
yyVAL.node = lhs
for i, v := range lx.values2 {
a := lx.parseActionValue(lx.positions2[i], v)
if a != nil {
lhs.Values = append(lhs.Values, a)
}
}
}
case 3:
{
yyVAL.node = &Definition{
Token: yyS[yypt-1].Token,
Token2: yyS[yypt-0].Token,
}
}
case 4:
{
lx := yylex.(*lexer)
lhs := &Definition{
Case: 1,
Token: yyS[yypt-0].Token,
}
yyVAL.node = lhs
lhs.Value = lx.value
}
case 5:
{
lx := yylex.(*lexer)
lx.pos2 = lx.pos
lx.value2 = lx.value
}
case 6:
{
lx := yylex.(*lexer)
lhs := &Definition{
Case: 2,
Token: yyS[yypt-2].Token,
Token2: yyS[yypt-0].Token,
}
yyVAL.node = lhs
lhs.Value = lx.value2
}
case 7:
{
lhs := &Definition{
Case: 3,
ReservedWord: yyS[yypt-2].node.(*ReservedWord),
Tag: yyS[yypt-1].node.(*Tag),
NameList: yyS[yypt-0].node.(*NameList).reverse(),
}
yyVAL.node = lhs
for n := lhs.NameList; n != nil; n = n.NameList {
lhs.Nlist = append(lhs.Nlist, n.Name)
}
}
case 8:
{
yyVAL.node = &Definition{
Case: 4,
ReservedWord: yyS[yypt-1].node.(*ReservedWord),
Tag: yyS[yypt-0].node.(*Tag),
}
}
case 9:
{
yyVAL.node = &Definition{
Case: 5,
Token: yyS[yypt-0].Token,
}
}
case 10:
{
yyVAL.node = (*DefinitionList)(nil)
}
case 11:
{
lx := yylex.(*lexer)
lhs := &DefinitionList{
DefinitionList: yyS[yypt-1].node.(*DefinitionList),
Definition: yyS[yypt-0].node.(*Definition),
}
yyVAL.node = lhs
lx.defs = append(lx.defs, lhs.Definition)
}
case 12:
{
yyVAL.node = (*LiteralStringOpt)(nil)
}
case 13:
{
yyVAL.node = &LiteralStringOpt{
Token: yyS[yypt-0].Token,
}
}
case 14:
{
lx := yylex.(*lexer)
lhs := &Name{
Token: yyS[yypt-1].Token,
LiteralStringOpt: yyS[yypt-0].node.(*LiteralStringOpt),
}
yyVAL.node = lhs
lhs.Identifier = lx.ident(lhs.Token)
lhs.Number = -1
}
case 15:
{
lx := yylex.(*lexer)
lhs := &Name{
Case: 1,
Token: yyS[yypt-2].Token,
Token2: yyS[yypt-1].Token,
LiteralStringOpt: yyS[yypt-0].node.(*LiteralStringOpt),
}
yyVAL.node = lhs
lhs.Identifier = lx.ident(lhs.Token)
lhs.Number = lx.number(lhs.Token2)
}
case 16:
{
yyVAL.node = &NameList{
Name: yyS[yypt-0].node.(*Name),
}
}
case 17:
{
yyVAL.node = &NameList{
Case: 1,
NameList: yyS[yypt-1].node.(*NameList),
Name: yyS[yypt-0].node.(*Name),
}
}
case 18:
{
yyVAL.node = &NameList{
Case: 2,
NameList: yyS[yypt-2].node.(*NameList),
Token: yyS[yypt-1].Token,
Name: yyS[yypt-0].node.(*Name),
}
}
case 19:
{
yyVAL.node = (*Precedence)(nil)
}
case 20:
{
lx := yylex.(*lexer)
lhs := &Precedence{
Case: 1,
Token: yyS[yypt-1].Token,
Token2: yyS[yypt-0].Token,
}
yyVAL.node = lhs
lhs.Identifier = lx.ident(lhs.Token2)
}
case 21:
{
lx := yylex.(*lexer)
lhs := &Precedence{
Case: 2,
Token: yyS[yypt-2].Token,
Token2: yyS[yypt-1].Token,
Action: yyS[yypt-0].node.(*Action),
}
yyVAL.node = lhs
lhs.Identifier = lx.ident(lhs.Token2)
}
case 22:
{
yyVAL.node = &Precedence{
Case: 3,
Precedence: yyS[yypt-1].node.(*Precedence),
Token: yyS[yypt-0].Token,
}
}
case 23:
{
yyVAL.node = &ReservedWord{
Token: yyS[yypt-0].Token,
}
}
case 24:
{
yyVAL.node = &ReservedWord{
Case: 1,
Token: yyS[yypt-0].Token,
}
}
case 25:
{
yyVAL.node = &ReservedWord{
Case: 2,
Token: yyS[yypt-0].Token,
}
}
case 26:
{
yyVAL.node = &ReservedWord{
Case: 3,
Token: yyS[yypt-0].Token,
}
}
case 27:
{
yyVAL.node = &ReservedWord{
Case: 4,
Token: yyS[yypt-0].Token,
}
}
case 28:
{
yyVAL.node = &ReservedWord{
Case: 5,
Token: yyS[yypt-0].Token,
}
}
case 29:
{
lx := yylex.(*lexer)
lhs := &Rule{
Token: yyS[yypt-2].Token,
RuleItemList: yyS[yypt-1].node.(*RuleItemList).reverse(),
Precedence: yyS[yypt-0].node.(*Precedence),
}
yyVAL.node = lhs
lx.ruleName = lhs.Token
lhs.Name = lhs.Token
}
case 30:
{
lx := yylex.(*lexer)
lhs := &Rule{
Case: 1,
Token: yyS[yypt-2].Token,
RuleItemList: yyS[yypt-1].node.(*RuleItemList).reverse(),
Precedence: yyS[yypt-0].node.(*Precedence),
}
yyVAL.node = lhs
lhs.Name = lx.ruleName
}
case 31:
{
yyVAL.node = (*RuleItemList)(nil)
}
case 32:
{
yyVAL.node = &RuleItemList{
Case: 1,
RuleItemList: yyS[yypt-1].node.(*RuleItemList),
Token: yyS[yypt-0].Token,
}
}
case 33:
{
yyVAL.node = &RuleItemList{
Case: 2,
RuleItemList: yyS[yypt-1].node.(*RuleItemList),
Action: yyS[yypt-0].node.(*Action),
}
}
case 34:
{
yyVAL.node = &RuleItemList{
Case: 3,
RuleItemList: yyS[yypt-1].node.(*RuleItemList),
Token: yyS[yypt-0].Token,
}
}
case 35:
{
lx := yylex.(*lexer)
lhs := &RuleList{
Token: yyS[yypt-2].Token,
RuleItemList: yyS[yypt-1].node.(*RuleItemList).reverse(),
Precedence: yyS[yypt-0].node.(*Precedence),
}
yyVAL.node = lhs
lx.ruleName = lhs.Token
rule := &Rule{
Token: lhs.Token,
Name: lhs.Token,
RuleItemList: lhs.RuleItemList,
Precedence: lhs.Precedence,
}
rule.collect()
lx.rules = append(lx.rules, rule)
}
case 36:
{
lx := yylex.(*lexer)
lhs := &RuleList{
Case: 1,
RuleList: yyS[yypt-1].node.(*RuleList),
Rule: yyS[yypt-0].node.(*Rule),
}
yyVAL.node = lhs
rule := lhs.Rule
rule.collect()
lx.rules = append(lx.rules, rule)
}
case 37:
{
lx := yylex.(*lexer)
lhs := &Specification{
DefinitionList: yyS[yypt-3].node.(*DefinitionList).reverse(),
Token: yyS[yypt-2].Token,
RuleList: yyS[yypt-1].node.(*RuleList).reverse(),
Tail: yyS[yypt-0].node.(*Tail),
}
yyVAL.node = lhs
lhs.Defs = lx.defs
lhs.Rules = lx.rules
lx.spec = lhs
}
case 38:
{
yyVAL.node = (*Tag)(nil)
}
case 39:
{
yyVAL.node = &Tag{
Token: yyS[yypt-2].Token,
Token2: yyS[yypt-1].Token,
Token3: yyS[yypt-0].Token,
}
}
case 40:
{
lx := yylex.(*lexer)
lhs := &Tail{
Token: yyS[yypt-0].Token,
}
yyVAL.node = lhs
lhs.Value = lx.value
}
case 41:
{
yyVAL.node = (*Tail)(nil)
}
}
if yyEx != nil && yyEx.Reduced(r, exState, &yyVAL) {
return -1
}
goto yystack /* stack new state and value */
}

456
_vendor/vendor/github.com/cznic/parser/yacc/parser.y generated vendored Normal file
View File

@ -0,0 +1,456 @@
// CAUTION: Generated by yy - DO NOT EDIT.
%{
// Copyright 2015 The parser Authors. All rights reserved. Use
// of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.
//
// This is a derived work base on the original at
//
// http://pubs.opengroup.org/onlinepubs/009695399/utilities/yacc.html
//
// The original work is
//
// Copyright © 2001-2004 The IEEE and The Open Group, All Rights reserved.
//
// Grammar for the input to yacc.
package parser
import (
"go/token"
)
%}
%union {
node Node
Token *Token
}
%token <Token>
','
';'
'<'
'>'
'{'
'|'
'}'
COMMENT
C_IDENTIFIER "rule name"
ERROR_VERBOSE "%error-verbose"
IDENTIFIER "identifier"
LCURL "%{"
LEFT "%left"
MARK "%%"
NONASSOC "%nonassoc"
NUMBER "number"
PREC "%prec"
PRECEDENCE "%precedence"
RCURL "%}"
RIGHT "%right"
START "%start"
STRING_LITERAL "string literal"
TOKEN "%token"
TYPE "%type"
UNION "%union"
%type <node>
Action
Definition
DefinitionList
LiteralStringOpt
Name
NameList
Precedence
ReservedWord
Rule
RuleItemList
RuleList
Specification
Tag
Tail
%start Specification
%%
Action:
'{'
{
lx := yylex.(*lexer)
lx.values2 = append([]string(nil), lx.values...)
lx.positions2 = append([]token.Pos(nil), lx.positions...)
}
'}'
{
lx := yylex.(*lexer)
lhs := &Action{
Token: $1,
Token2: $3,
}
$$ = lhs
for i, v := range lx.values2 {
a := lx.parseActionValue(lx.positions2[i], v)
if a != nil {
lhs.Values = append(lhs.Values, a)
}
}
}
Definition:
START IDENTIFIER
{
$$ = &Definition{
Token: $1,
Token2: $2,
}
}
| UNION
{
lx := yylex.(*lexer)
lhs := &Definition{
Case: 1,
Token: $1,
}
$$ = lhs
lhs.Value = lx.value
}
| LCURL
{
lx := yylex.(*lexer)
lx.pos2 = lx.pos
lx.value2 = lx.value
}
RCURL
{
lx := yylex.(*lexer)
lhs := &Definition{
Case: 2,
Token: $1,
Token2: $3,
}
$$ = lhs
lhs.Value = lx.value2
}
| ReservedWord Tag NameList
{
lhs := &Definition{
Case: 3,
ReservedWord: $1.(*ReservedWord),
Tag: $2.(*Tag),
NameList: $3.(*NameList).reverse(),
}
$$ = lhs
for n := lhs.NameList; n != nil; n = n.NameList {
lhs.Nlist = append(lhs.Nlist, n.Name)
}
}
| ReservedWord Tag
{
$$ = &Definition{
Case: 4,
ReservedWord: $1.(*ReservedWord),
Tag: $2.(*Tag),
}
}
| ERROR_VERBOSE
{
$$ = &Definition{
Case: 5,
Token: $1,
}
}
DefinitionList:
/* empty */
{
$$ = (*DefinitionList)(nil)
}
| DefinitionList Definition
{
lx := yylex.(*lexer)
lhs := &DefinitionList{
DefinitionList: $1.(*DefinitionList),
Definition: $2.(*Definition),
}
$$ = lhs
lx.defs = append(lx.defs, lhs.Definition)
}
LiteralStringOpt:
/* empty */
{
$$ = (*LiteralStringOpt)(nil)
}
| STRING_LITERAL
{
$$ = &LiteralStringOpt{
Token: $1,
}
}
Name:
IDENTIFIER LiteralStringOpt
{
lx := yylex.(*lexer)
lhs := &Name{
Token: $1,
LiteralStringOpt: $2.(*LiteralStringOpt),
}
$$ = lhs
lhs.Identifier = lx.ident(lhs.Token)
lhs.Number = -1
}
| IDENTIFIER NUMBER LiteralStringOpt
{
lx := yylex.(*lexer)
lhs := &Name{
Case: 1,
Token: $1,
Token2: $2,
LiteralStringOpt: $3.(*LiteralStringOpt),
}
$$ = lhs
lhs.Identifier = lx.ident(lhs.Token)
lhs.Number = lx.number(lhs.Token2)
}
NameList:
Name
{
$$ = &NameList{
Name: $1.(*Name),
}
}
| NameList Name
{
$$ = &NameList{
Case: 1,
NameList: $1.(*NameList),
Name: $2.(*Name),
}
}
| NameList ',' Name
{
$$ = &NameList{
Case: 2,
NameList: $1.(*NameList),
Token: $2,
Name: $3.(*Name),
}
}
Precedence:
/* empty */
{
$$ = (*Precedence)(nil)
}
| PREC IDENTIFIER
{
lx := yylex.(*lexer)
lhs := &Precedence{
Case: 1,
Token: $1,
Token2: $2,
}
$$ = lhs
lhs.Identifier = lx.ident(lhs.Token2)
}
| PREC IDENTIFIER Action
{
lx := yylex.(*lexer)
lhs := &Precedence{
Case: 2,
Token: $1,
Token2: $2,
Action: $3.(*Action),
}
$$ = lhs
lhs.Identifier = lx.ident(lhs.Token2)
}
| Precedence ';'
{
$$ = &Precedence{
Case: 3,
Precedence: $1.(*Precedence),
Token: $2,
}
}
ReservedWord:
TOKEN
{
$$ = &ReservedWord{
Token: $1,
}
}
| LEFT
{
$$ = &ReservedWord{
Case: 1,
Token: $1,
}
}
| RIGHT
{
$$ = &ReservedWord{
Case: 2,
Token: $1,
}
}
| NONASSOC
{
$$ = &ReservedWord{
Case: 3,
Token: $1,
}
}
| TYPE
{
$$ = &ReservedWord{
Case: 4,
Token: $1,
}
}
| PRECEDENCE
{
$$ = &ReservedWord{
Case: 5,
Token: $1,
}
}
Rule:
C_IDENTIFIER RuleItemList Precedence
{
lx := yylex.(*lexer)
lhs := &Rule{
Token: $1,
RuleItemList: $2.(*RuleItemList).reverse(),
Precedence: $3.(*Precedence),
}
$$ = lhs
lx.ruleName = lhs.Token
lhs.Name = lhs.Token
}
| '|' RuleItemList Precedence
{
lx := yylex.(*lexer)
lhs := &Rule{
Case: 1,
Token: $1,
RuleItemList: $2.(*RuleItemList).reverse(),
Precedence: $3.(*Precedence),
}
$$ = lhs
lhs.Name = lx.ruleName
}
RuleItemList:
/* empty */
{
$$ = (*RuleItemList)(nil)
}
| RuleItemList IDENTIFIER
{
$$ = &RuleItemList{
Case: 1,
RuleItemList: $1.(*RuleItemList),
Token: $2,
}
}
| RuleItemList Action
{
$$ = &RuleItemList{
Case: 2,
RuleItemList: $1.(*RuleItemList),
Action: $2.(*Action),
}
}
| RuleItemList STRING_LITERAL
{
$$ = &RuleItemList{
Case: 3,
RuleItemList: $1.(*RuleItemList),
Token: $2,
}
}
RuleList:
C_IDENTIFIER RuleItemList Precedence
{
lx := yylex.(*lexer)
lhs := &RuleList{
Token: $1,
RuleItemList: $2.(*RuleItemList).reverse(),
Precedence: $3.(*Precedence),
}
$$ = lhs
lx.ruleName = lhs.Token
rule := &Rule{
Token: lhs.Token,
Name: lhs.Token,
RuleItemList: lhs.RuleItemList,
Precedence: lhs.Precedence,
}
rule.collect()
lx.rules = append(lx.rules, rule)
}
| RuleList Rule
{
lx := yylex.(*lexer)
lhs := &RuleList{
Case: 1,
RuleList: $1.(*RuleList),
Rule: $2.(*Rule),
}
$$ = lhs
rule := lhs.Rule
rule.collect()
lx.rules = append(lx.rules, rule)
}
Specification:
DefinitionList "%%" RuleList Tail
{
lx := yylex.(*lexer)
lhs := &Specification{
DefinitionList: $1.(*DefinitionList).reverse(),
Token: $2,
RuleList: $3.(*RuleList).reverse(),
Tail: $4.(*Tail),
}
$$ = lhs
lhs.Defs = lx.defs
lhs.Rules = lx.rules
lx.spec = lhs
}
Tag:
/* empty */
{
$$ = (*Tag)(nil)
}
| '<' IDENTIFIER '>'
{
$$ = &Tag{
Token: $1,
Token2: $2,
Token3: $3,
}
}
Tail:
"%%"
{
lx := yylex.(*lexer)
lhs := &Tail{
Token: $1,
}
$$ = lhs
lhs.Value = lx.value
}
| /* empty */
{
$$ = (*Tail)(nil)
}

203
_vendor/vendor/github.com/cznic/parser/yacc/parser.yy generated vendored Normal file
View File

@ -0,0 +1,203 @@
%{
// Copyright 2015 The parser Authors. All rights reserved. Use
// of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.
//
// This is a derived work base on the original at
//
// http://pubs.opengroup.org/onlinepubs/009695399/utilities/yacc.html
//
// The original work is
//
// Copyright © 2001-2004 The IEEE and The Open Group, All Rights reserved.
//
// Grammar for the input to yacc.
package parser
import (
"go/token"
)
%}
%union {
node Node
Token *Token
}
%token <Token>
/*yy:token "%c:" */ C_IDENTIFIER "rule name"
/*yy:token "%c" */ IDENTIFIER "identifier"
/*yy:token "\"%c\"" */ STRING_LITERAL "string literal"
/*yy:token "%d" */ NUMBER "number"
COMMENT
ERROR_VERBOSE "%error-verbose"
LCURL "%{"
LEFT "%left"
MARK "%%"
NONASSOC "%nonassoc"
PREC "%prec"
PRECEDENCE "%precedence"
RCURL "%}"
RIGHT "%right"
START "%start"
TOKEN "%token"
TYPE "%type"
UNION "%union"
%start Specification
%%
//yy:field Values []*ActionValue // For backward compatibility.
Action:
'{'
{
lx.values2 = append([]string(nil), lx.values...)
lx.positions2 = append([]token.Pos(nil), lx.positions...)
}
'}'
{
for i, v := range lx.values2 {
a := lx.parseActionValue(lx.positions2[i], v)
if a != nil {
lhs.Values = append(lhs.Values, a)
}
}
}
//yy:field Nlist []*Name // For backward compatibility.
//yy:field Value string
Definition:
START IDENTIFIER
//yy:example "%union{int i} %%"
| UNION
{
lhs.Value = lx.value
}
| LCURL
{
lx.pos2 = lx.pos
lx.value2 = lx.value
}
RCURL
{
lhs.Value = lx.value2
}
| ReservedWord Tag NameList
{
for n := lhs.NameList; n != nil; n = n.NameList {
lhs.Nlist = append(lhs.Nlist, n.Name)
}
}
| ReservedWord Tag
| ERROR_VERBOSE
DefinitionList:
| DefinitionList Definition
{
lx.defs = append(lx.defs, lhs.Definition)
}
LiteralStringOpt:
| STRING_LITERAL
//yy:field Identifier interface{} // For backward compatibility.
//yy:field Number int // For backward compatibility.
Name:
IDENTIFIER LiteralStringOpt
{
lhs.Identifier = lx.ident(lhs.Token)
lhs.Number = -1
}
| IDENTIFIER NUMBER LiteralStringOpt
{
lhs.Identifier = lx.ident(lhs.Token)
lhs.Number = lx.number(lhs.Token2)
}
NameList:
Name
| NameList Name
| NameList ',' Name
//yy:field Identifier interface{} // Name string or literal int.
Precedence:
/* empty */ {}
| PREC IDENTIFIER
{
lhs.Identifier = lx.ident(lhs.Token2)
}
| PREC IDENTIFIER Action
{
lhs.Identifier = lx.ident(lhs.Token2)
}
| Precedence ';'
ReservedWord:
TOKEN
| LEFT
| RIGHT
| NONASSOC
| TYPE
| PRECEDENCE
//yy:field Body []interface{} // For backward compatibility.
//yy:field Name *Token
Rule:
C_IDENTIFIER RuleItemList Precedence
{
lx.ruleName = lhs.Token
lhs.Name = lhs.Token
}
| '|' RuleItemList Precedence
{
lhs.Name = lx.ruleName
}
RuleItemList:
| RuleItemList IDENTIFIER
| RuleItemList Action
| RuleItemList STRING_LITERAL
RuleList:
C_IDENTIFIER RuleItemList Precedence
{
lx.ruleName = lhs.Token
rule := &Rule{
Token: lhs.Token,
Name: lhs.Token,
RuleItemList: lhs.RuleItemList,
Precedence: lhs.Precedence,
}
rule.collect()
lx.rules = append(lx.rules, rule)
}
| RuleList Rule
{
rule := lhs.Rule
rule.collect()
lx.rules = append(lx.rules, rule)
}
//yy:field Defs []*Definition // For backward compatibility.
//yy:field Rules []*Rule // For backward compatibility.
Specification:
DefinitionList "%%" RuleList Tail
{
lhs.Defs = lx.defs
lhs.Rules = lx.rules
lx.spec = lhs
}
Tag:
| '<' IDENTIFIER '>'
//yy:field Value string
Tail:
"%%"
{
lhs.Value = lx.value
}
| /* empty */

856
_vendor/vendor/github.com/cznic/parser/yacc/scanner.go generated vendored Normal file
View File

@ -0,0 +1,856 @@
// CAUTION: Generated file - DO NOT EDIT.
// Copyright 2015 The parser Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// CAUTION: Generated file (unless this is y.l) - DO NOT EDIT!
package parser
import (
"github.com/cznic/golex/lex"
)
func (l *lexer) scan() lex.Char {
c := l.Enter()
yystate0:
yyrule := -1
_ = yyrule
c = l.Rule0()
goto yystart1
goto yystate0 // silence unused label error
goto yyAction // silence unused label error
yyAction:
switch yyrule {
case 1:
goto yyrule1
case 2:
goto yyrule2
case 3:
goto yyrule3
case 4:
goto yyrule4
case 5:
goto yyrule5
case 6:
goto yyrule6
case 7:
goto yyrule7
case 8:
goto yyrule8
case 9:
goto yyrule9
case 10:
goto yyrule10
case 11:
goto yyrule11
case 12:
goto yyrule12
case 13:
goto yyrule13
case 14:
goto yyrule14
case 15:
goto yyrule15
case 16:
goto yyrule16
case 17:
goto yyrule17
case 18:
goto yyrule18
case 19:
goto yyrule19
}
goto yystate1 // silence unused label error
yystate1:
c = l.Next()
yystart1:
switch {
default:
goto yyabort
case c == '"':
goto yystate3
case c == '%':
goto yystate6
case c == '/':
goto yystate71
case c == '\'':
goto yystate68
case c == '\t' || c == '\n' || c == '\f' || c == '\r' || c == ' ':
goto yystate2
case c >= '0' && c <= '9':
goto yystate76
case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate77
}
yystate2:
c = l.Next()
yyrule = 1
l.Mark()
goto yyrule1
yystate3:
c = l.Next()
switch {
default:
goto yyabort
case c == '"':
goto yystate4
case c == '\\':
goto yystate5
case c >= '\x01' && c <= '\t' || c == '\v' || c == '\f' || c >= '\x0e' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= '\u007f' || c >= '\u0081' && c <= 'ÿ':
goto yystate3
}
yystate4:
c = l.Next()
yyrule = 19
l.Mark()
goto yyrule19
yystate5:
c = l.Next()
switch {
default:
goto yyabort
case c >= '\x01' && c <= '\t' || c == '\v' || c == '\f' || c >= '\x0e' && c <= '\u007f' || c >= '\u0081' && c <= 'ÿ':
goto yystate3
}
yystate6:
c = l.Next()
switch {
default:
goto yyabort
case c == '%':
goto yystate7
case c == 'e':
goto yystate8
case c == 'l':
goto yystate21
case c == 'n':
goto yystate25
case c == 'p':
goto yystate33
case c == 'r':
goto yystate43
case c == 's':
goto yystate48
case c == 't':
goto yystate53
case c == 'u':
goto yystate61
case c == '{':
goto yystate66
case c == '}':
goto yystate67
}
yystate7:
c = l.Next()
yyrule = 6
l.Mark()
goto yyrule6
yystate8:
c = l.Next()
switch {
default:
goto yyabort
case c == 'r':
goto yystate9
}
yystate9:
c = l.Next()
switch {
default:
goto yyabort
case c == 'r':
goto yystate10
}
yystate10:
c = l.Next()
switch {
default:
goto yyabort
case c == 'o':
goto yystate11
}
yystate11:
c = l.Next()
switch {
default:
goto yyabort
case c == 'r':
goto yystate12
}
yystate12:
c = l.Next()
switch {
default:
goto yyabort
case c == '-':
goto yystate13
}
yystate13:
c = l.Next()
switch {
default:
goto yyabort
case c == 'v':
goto yystate14
}
yystate14:
c = l.Next()
switch {
default:
goto yyabort
case c == 'e':
goto yystate15
}
yystate15:
c = l.Next()
switch {
default:
goto yyabort
case c == 'r':
goto yystate16
}
yystate16:
c = l.Next()
switch {
default:
goto yyabort
case c == 'b':
goto yystate17
}
yystate17:
c = l.Next()
switch {
default:
goto yyabort
case c == 'o':
goto yystate18
}
yystate18:
c = l.Next()
switch {
default:
goto yyabort
case c == 's':
goto yystate19
}
yystate19:
c = l.Next()
switch {
default:
goto yyabort
case c == 'e':
goto yystate20
}
yystate20:
c = l.Next()
yyrule = 16
l.Mark()
goto yyrule16
yystate21:
c = l.Next()
switch {
default:
goto yyabort
case c == 'e':
goto yystate22
}
yystate22:
c = l.Next()
switch {
default:
goto yyabort
case c == 'f':
goto yystate23
}
yystate23:
c = l.Next()
switch {
default:
goto yyabort
case c == 't':
goto yystate24
}
yystate24:
c = l.Next()
yyrule = 7
l.Mark()
goto yyrule7
yystate25:
c = l.Next()
switch {
default:
goto yyabort
case c == 'o':
goto yystate26
}
yystate26:
c = l.Next()
switch {
default:
goto yyabort
case c == 'n':
goto yystate27
}
yystate27:
c = l.Next()
switch {
default:
goto yyabort
case c == 'a':
goto yystate28
}
yystate28:
c = l.Next()
switch {
default:
goto yyabort
case c == 's':
goto yystate29
}
yystate29:
c = l.Next()
switch {
default:
goto yyabort
case c == 's':
goto yystate30
}
yystate30:
c = l.Next()
switch {
default:
goto yyabort
case c == 'o':
goto yystate31
}
yystate31:
c = l.Next()
switch {
default:
goto yyabort
case c == 'c':
goto yystate32
}
yystate32:
c = l.Next()
yyrule = 8
l.Mark()
goto yyrule8
yystate33:
c = l.Next()
switch {
default:
goto yyabort
case c == 'r':
goto yystate34
}
yystate34:
c = l.Next()
switch {
default:
goto yyabort
case c == 'e':
goto yystate35
}
yystate35:
c = l.Next()
switch {
default:
goto yyabort
case c == 'c':
goto yystate36
}
yystate36:
c = l.Next()
yyrule = 9
l.Mark()
switch {
default:
goto yyrule9
case c == 'e':
goto yystate37
}
yystate37:
c = l.Next()
switch {
default:
goto yyabort
case c == 'd':
goto yystate38
}
yystate38:
c = l.Next()
switch {
default:
goto yyabort
case c == 'e':
goto yystate39
}
yystate39:
c = l.Next()
switch {
default:
goto yyabort
case c == 'n':
goto yystate40
}
yystate40:
c = l.Next()
switch {
default:
goto yyabort
case c == 'c':
goto yystate41
}
yystate41:
c = l.Next()
switch {
default:
goto yyabort
case c == 'e':
goto yystate42
}
yystate42:
c = l.Next()
yyrule = 10
l.Mark()
goto yyrule10
yystate43:
c = l.Next()
switch {
default:
goto yyabort
case c == 'i':
goto yystate44
}
yystate44:
c = l.Next()
switch {
default:
goto yyabort
case c == 'g':
goto yystate45
}
yystate45:
c = l.Next()
switch {
default:
goto yyabort
case c == 'h':
goto yystate46
}
yystate46:
c = l.Next()
switch {
default:
goto yyabort
case c == 't':
goto yystate47
}
yystate47:
c = l.Next()
yyrule = 11
l.Mark()
goto yyrule11
yystate48:
c = l.Next()
switch {
default:
goto yyabort
case c == 't':
goto yystate49
}
yystate49:
c = l.Next()
switch {
default:
goto yyabort
case c == 'a':
goto yystate50
}
yystate50:
c = l.Next()
switch {
default:
goto yyabort
case c == 'r':
goto yystate51
}
yystate51:
c = l.Next()
switch {
default:
goto yyabort
case c == 't':
goto yystate52
}
yystate52:
c = l.Next()
yyrule = 12
l.Mark()
goto yyrule12
yystate53:
c = l.Next()
switch {
default:
goto yyabort
case c == 'o':
goto yystate54
case c == 'y':
goto yystate58
}
yystate54:
c = l.Next()
switch {
default:
goto yyabort
case c == 'k':
goto yystate55
}
yystate55:
c = l.Next()
switch {
default:
goto yyabort
case c == 'e':
goto yystate56
}
yystate56:
c = l.Next()
switch {
default:
goto yyabort
case c == 'n':
goto yystate57
}
yystate57:
c = l.Next()
yyrule = 13
l.Mark()
goto yyrule13
yystate58:
c = l.Next()
switch {
default:
goto yyabort
case c == 'p':
goto yystate59
}
yystate59:
c = l.Next()
switch {
default:
goto yyabort
case c == 'e':
goto yystate60
}
yystate60:
c = l.Next()
yyrule = 14
l.Mark()
goto yyrule14
yystate61:
c = l.Next()
switch {
default:
goto yyabort
case c == 'n':
goto yystate62
}
yystate62:
c = l.Next()
switch {
default:
goto yyabort
case c == 'i':
goto yystate63
}
yystate63:
c = l.Next()
switch {
default:
goto yyabort
case c == 'o':
goto yystate64
}
yystate64:
c = l.Next()
switch {
default:
goto yyabort
case c == 'n':
goto yystate65
}
yystate65:
c = l.Next()
yyrule = 15
l.Mark()
goto yyrule15
yystate66:
c = l.Next()
yyrule = 4
l.Mark()
goto yyrule4
yystate67:
c = l.Next()
yyrule = 5
l.Mark()
goto yyrule5
yystate68:
c = l.Next()
switch {
default:
goto yyabort
case c == '\'':
goto yystate69
case c == '\\':
goto yystate70
case c >= '\x01' && c <= '\t' || c == '\v' || c == '\f' || c >= '\x0e' && c <= '&' || c >= '(' && c <= '[' || c >= ']' && c <= '\u007f' || c >= '\u0081' && c <= 'ÿ':
goto yystate68
}
yystate69:
c = l.Next()
yyrule = 18
l.Mark()
goto yyrule18
yystate70:
c = l.Next()
switch {
default:
goto yyabort
case c >= '\x01' && c <= '\t' || c == '\v' || c == '\f' || c >= '\x0e' && c <= '\u007f' || c >= '\u0081' && c <= 'ÿ':
goto yystate68
}
yystate71:
c = l.Next()
switch {
default:
goto yyabort
case c == '*':
goto yystate72
case c == '/':
goto yystate75
}
yystate72:
c = l.Next()
switch {
default:
goto yyabort
case c == '*':
goto yystate73
case c >= '\x01' && c <= ')' || c >= '+' && c <= '\u007f' || c >= '\u0081' && c <= 'ÿ':
goto yystate72
}
yystate73:
c = l.Next()
switch {
default:
goto yyabort
case c == '*':
goto yystate73
case c == '/':
goto yystate74
case c >= '\x01' && c <= ')' || c >= '+' && c <= '.' || c >= '0' && c <= '\u007f' || c >= '\u0081' && c <= 'ÿ':
goto yystate72
}
yystate74:
c = l.Next()
yyrule = 3
l.Mark()
goto yyrule3
yystate75:
c = l.Next()
yyrule = 2
l.Mark()
switch {
default:
goto yyrule2
case c >= '\x01' && c <= '\t' || c == '\v' || c == '\f' || c >= '\x0e' && c <= '\u007f' || c >= '\u0081' && c <= 'ÿ':
goto yystate75
}
yystate76:
c = l.Next()
yyrule = 17
l.Mark()
switch {
default:
goto yyrule17
case c >= '0' && c <= '9':
goto yystate76
}
yystate77:
c = l.Next()
yyrule = 18
l.Mark()
switch {
default:
goto yyrule18
case c == '.' || c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate77
}
yyrule1: // [ \n\r\t\f]
goto yystate0
yyrule2: // "//"[^\x80\n\r]*
yyrule3: // "/*"([^*\x80]|\*+[^*/\x80])*\*+\/
{
return l.char(COMMENT)
}
yyrule4: // %"{"
{
return l.char(LCURL)
}
yyrule5: // %"}"
{
return l.char(RCURL)
}
yyrule6: // %%
{
return l.char(MARK)
}
yyrule7: // %left
{
return l.char(LEFT)
}
yyrule8: // %nonassoc
{
return l.char(NONASSOC)
}
yyrule9: // %prec
{
return l.char(PREC)
}
yyrule10: // %precedence
{
return l.char(PRECEDENCE)
}
yyrule11: // %right
{
return l.char(RIGHT)
}
yyrule12: // %start
{
return l.char(START)
}
yyrule13: // %token
{
return l.char(TOKEN)
}
yyrule14: // %type
{
return l.char(TYPE)
}
yyrule15: // %union
{
return l.char(UNION)
}
yyrule16: // %error-verbose
{
return l.char(ERROR_VERBOSE)
}
yyrule17: // [0-9]+
{
return l.char(NUMBER)
}
yyrule18: // {identifier}
{
return l.char(IDENTIFIER)
}
yyrule19: // {str-literal}
{
return l.char(STRING_LITERAL)
}
panic("unreachable")
goto yyabort // silence unused label error
yyabort: // no lexem recognized
if c, ok := l.Abort(); ok {
return l.char(c)
}
goto yyAction
}

0
_vendor/vendor/github.com/cznic/parser/yacc/xerrors generated vendored Normal file
View File

65
_vendor/vendor/github.com/cznic/parser/yacc/y.l generated vendored Normal file
View File

@ -0,0 +1,65 @@
%{
// Copyright 2015 The parser Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// CAUTION: Generated file (unless this is y.l) - DO NOT EDIT!
package parser
import (
"github.com/cznic/golex/lex"
)
func (l *lexer) scan() lex.Char {
c := l.Enter()
%}
%yyc c
%yyn c = l.Next()
%yym l.Mark()
eof \x80
identifier {name}|{literal}
literal '(\\[^\n\r\x80]|[^\\'\n\r\x80])*'
name {name-first}{name-next}*
name-first [_a-zA-Z]
name-next {name-first}|[0-9.]
str-literal \x22(\\[^\n\r\x80]|[^\\\x22\x80\n\r])*\x22
%%
c = l.Rule0()
[ \n\r\t\f]
"//"[^\x80\n\r]* |
"/*"([^*\x80]|\*+[^*/\x80])*\*+\/
return l.char(COMMENT)
%"{" return l.char(LCURL)
%"}" return l.char(RCURL)
%% return l.char(MARK)
%left return l.char(LEFT)
%nonassoc return l.char(NONASSOC)
%prec return l.char(PREC)
%precedence return l.char(PRECEDENCE)
%right return l.char(RIGHT)
%start return l.char(START)
%token return l.char(TOKEN)
%type return l.char(TYPE)
%union return l.char(UNION)
%error-verbose return l.char(ERROR_VERBOSE)
[0-9]+ return l.char(NUMBER)
{identifier} return l.char(IDENTIFIER)
{str-literal} return l.char(STRING_LITERAL)
%%
if c, ok := l.Abort(); ok {
return l.char(c)
}
goto yyAction
}

11
_vendor/vendor/github.com/cznic/sortutil/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,11 @@
# This file lists authors for copyright purposes. This file is distinct from
# the CONTRIBUTORS files. See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
#
# The email address is not required for organizations.
#
# Please keep the list sorted.
Jan Mercl <0xjnml@gmail.com>

10
_vendor/vendor/github.com/cznic/sortutil/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,10 @@
# This file lists people who contributed code to this repository. The AUTHORS
# file lists the copyright holders; this file lists people.
#
# Names should be added to this file like so:
# Name <email address>
#
# Please keep the list sorted.
Gary Burd <gary@beagledreams.com>
Jan Mercl <0xjnml@gmail.com>

27
_vendor/vendor/github.com/cznic/sortutil/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2014 The sortutil Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

35
_vendor/vendor/github.com/cznic/sortutil/Makefile generated vendored Normal file
View File

@ -0,0 +1,35 @@
# Copyright 2014 The sortutil Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
.PHONY: all clean editor later nuke todo
grep=--include=*.go
all: editor
go vet
golint .
make todo
clean:
go clean
rm -f *~
editor:
gofmt -s -l -w *.go
go test -i
go test
go build
later:
@grep -n $(grep) LATER * || true
@grep -n $(grep) MAYBE * || true
nuke: clean
go clean -i
todo:
@grep -nr $(grep) ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* * || true
@grep -nr $(grep) TODO * || true
@grep -nr $(grep) BUG * || true
@grep -nr $(grep) println * || true

4
_vendor/vendor/github.com/cznic/sortutil/README generated vendored Normal file
View File

@ -0,0 +1,4 @@
Packages in this repository:
Install: $ go get github.com/cznic/sortutil
Godocs: http://godoc.org/github.com/cznic/sortutil

271
_vendor/vendor/github.com/cznic/sortutil/sortutil.go generated vendored Normal file
View File

@ -0,0 +1,271 @@
// Copyright 2014 The sortutil Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sortutil provides utilities supplementing the standard 'sort' package.
//
// Changelog
//
// 2015-06-17: Added utils for math/big.{Int,Rat}.
package sortutil
import (
"math/big"
)
import "sort"
// BigIntSlice attaches the methods of sort.Interface to []*big.Int, sorting in increasing order.
type BigIntSlice []*big.Int
func (s BigIntSlice) Len() int { return len(s) }
func (s BigIntSlice) Less(i, j int) bool { return s[i].Cmp(s[j]) < 0 }
func (s BigIntSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// Sort is a convenience method.
func (s BigIntSlice) Sort() {
sort.Sort(s)
}
// SearchBigInts searches for x in a sorted slice of *big.Int and returns the index
// as specified by sort.Search. The slice must be sorted in ascending order.
func SearchBigInts(a []*big.Int, x *big.Int) int {
return sort.Search(len(a), func(i int) bool { return a[i].Cmp(x) >= 0 })
}
// BigRatSlice attaches the methods of sort.Interface to []*big.Rat, sorting in increasing order.
type BigRatSlice []*big.Rat
func (s BigRatSlice) Len() int { return len(s) }
func (s BigRatSlice) Less(i, j int) bool { return s[i].Cmp(s[j]) < 0 }
func (s BigRatSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// Sort is a convenience method.
func (s BigRatSlice) Sort() {
sort.Sort(s)
}
// SearchBigRats searches for x in a sorted slice of *big.Int and returns the index
// as specified by sort.Search. The slice must be sorted in ascending order.
func SearchBigRats(a []*big.Rat, x *big.Rat) int {
return sort.Search(len(a), func(i int) bool { return a[i].Cmp(x) >= 0 })
}
// ByteSlice attaches the methods of sort.Interface to []byte, sorting in increasing order.
type ByteSlice []byte
func (s ByteSlice) Len() int { return len(s) }
func (s ByteSlice) Less(i, j int) bool { return s[i] < s[j] }
func (s ByteSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// Sort is a convenience method.
func (s ByteSlice) Sort() {
sort.Sort(s)
}
// SearchBytes searches for x in a sorted slice of bytes and returns the index
// as specified by sort.Search. The slice must be sorted in ascending order.
func SearchBytes(a []byte, x byte) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Float32Slice attaches the methods of sort.Interface to []float32, sorting in increasing order.
type Float32Slice []float32
func (s Float32Slice) Len() int { return len(s) }
func (s Float32Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s Float32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// Sort is a convenience method.
func (s Float32Slice) Sort() {
sort.Sort(s)
}
// SearchFloat32s searches for x in a sorted slice of float32 and returns the index
// as specified by sort.Search. The slice must be sorted in ascending order.
func SearchFloat32s(a []float32, x float32) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Int8Slice attaches the methods of sort.Interface to []int8, sorting in increasing order.
type Int8Slice []int8
func (s Int8Slice) Len() int { return len(s) }
func (s Int8Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s Int8Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// Sort is a convenience method.
func (s Int8Slice) Sort() {
sort.Sort(s)
}
// SearchInt8s searches for x in a sorted slice of int8 and returns the index
// as specified by sort.Search. The slice must be sorted in ascending order.
func SearchInt8s(a []int8, x int8) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Int16Slice attaches the methods of sort.Interface to []int16, sorting in increasing order.
type Int16Slice []int16
func (s Int16Slice) Len() int { return len(s) }
func (s Int16Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s Int16Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// Sort is a convenience method.
func (s Int16Slice) Sort() {
sort.Sort(s)
}
// SearchInt16s searches for x in a sorted slice of int16 and returns the index
// as specified by sort.Search. The slice must be sorted in ascending order.
func SearchInt16s(a []int16, x int16) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Int32Slice attaches the methods of sort.Interface to []int32, sorting in increasing order.
type Int32Slice []int32
func (s Int32Slice) Len() int { return len(s) }
func (s Int32Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s Int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// Sort is a convenience method.
func (s Int32Slice) Sort() {
sort.Sort(s)
}
// SearchInt32s searches for x in a sorted slice of int32 and returns the index
// as specified by sort.Search. The slice must be sorted in ascending order.
func SearchInt32s(a []int32, x int32) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Int64Slice attaches the methods of sort.Interface to []int64, sorting in increasing order.
type Int64Slice []int64
func (s Int64Slice) Len() int { return len(s) }
func (s Int64Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s Int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// Sort is a convenience method.
func (s Int64Slice) Sort() {
sort.Sort(s)
}
// SearchInt64s searches for x in a sorted slice of int64 and returns the index
// as specified by sort.Search. The slice must be sorted in ascending order.
func SearchInt64s(a []int64, x int64) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// UintSlice attaches the methods of sort.Interface to []uint, sorting in increasing order.
type UintSlice []uint
func (s UintSlice) Len() int { return len(s) }
func (s UintSlice) Less(i, j int) bool { return s[i] < s[j] }
func (s UintSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// Sort is a convenience method.
func (s UintSlice) Sort() {
sort.Sort(s)
}
// SearchUints searches for x in a sorted slice of uints and returns the index
// as specified by sort.Search. The slice must be sorted in ascending order.
func SearchUints(a []uint, x uint) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Uint16Slice attaches the methods of sort.Interface to []uint16, sorting in increasing order.
type Uint16Slice []uint16
func (s Uint16Slice) Len() int { return len(s) }
func (s Uint16Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s Uint16Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// Sort is a convenience method.
func (s Uint16Slice) Sort() {
sort.Sort(s)
}
// SearchUint16s searches for x in a sorted slice of uint16 and returns the index
// as specified by sort.Search. The slice must be sorted in ascending order.
func SearchUint16s(a []uint16, x uint16) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Uint32Slice attaches the methods of sort.Interface to []uint32, sorting in increasing order.
type Uint32Slice []uint32
func (s Uint32Slice) Len() int { return len(s) }
func (s Uint32Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s Uint32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// Sort is a convenience method.
func (s Uint32Slice) Sort() {
sort.Sort(s)
}
// SearchUint32s searches for x in a sorted slice of uint32 and returns the index
// as specified by sort.Search. The slice must be sorted in ascending order.
func SearchUint32s(a []uint32, x uint32) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Uint64Slice attaches the methods of sort.Interface to []uint64, sorting in increasing order.
type Uint64Slice []uint64
func (s Uint64Slice) Len() int { return len(s) }
func (s Uint64Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s Uint64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// Sort is a convenience method.
func (s Uint64Slice) Sort() {
sort.Sort(s)
}
// SearchUint64s searches for x in a sorted slice of uint64 and returns the index
// as specified by sort.Search. The slice must be sorted in ascending order.
func SearchUint64s(a []uint64, x uint64) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// RuneSlice attaches the methods of sort.Interface to []rune, sorting in increasing order.
type RuneSlice []rune
func (s RuneSlice) Len() int { return len(s) }
func (s RuneSlice) Less(i, j int) bool { return s[i] < s[j] }
func (s RuneSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// Sort is a convenience method.
func (s RuneSlice) Sort() {
sort.Sort(s)
}
// SearchRunes searches for x in a sorted slice of uint64 and returns the index
// as specified by sort.Search. The slice must be sorted in ascending order.
func SearchRunes(a []rune, x rune) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Dedupe returns n, the number of distinct elements in data. The resulting
// elements are sorted in elements [0, n) or data[:n] for a slice.
func Dedupe(data sort.Interface) (n int) {
if n = data.Len(); n < 2 {
return n
}
sort.Sort(data)
a, b := 0, 1
for b < n {
if data.Less(a, b) {
a++
if a != b {
data.Swap(a, b)
}
}
b++
}
return a + 1
}

12
_vendor/vendor/github.com/cznic/strutil/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,12 @@
# This file lists authors for copyright purposes. This file is distinct from
# the CONTRIBUTORS files. See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
#
# The email address is not required for organizations.
#
# Please keep the list sorted.
CZ.NIC z.s.p.o. <kontakt@nic.cz>
Jan Mercl <0xjnml@gmail.com>

9
_vendor/vendor/github.com/cznic/strutil/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,9 @@
# This file lists people who contributed code to this repository. The AUTHORS
# file lists the copyright holders; this file lists people.
#
# Names should be added to this file like so:
# Name <email address>
#
# Please keep the list sorted.
Jan Mercl <0xjnml@gmail.com>

27
_vendor/vendor/github.com/cznic/strutil/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2014 The strutil Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

50
_vendor/vendor/github.com/cznic/strutil/Makefile generated vendored Normal file
View File

@ -0,0 +1,50 @@
# Copyright (c) 2014 The sortutil Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
.PHONY: all clean cover cpu editor internalError later mem nuke todo
grep=--include=*.go --include=*.l --include=*.y
all: editor
go vet || true
golint || true
make todo
clean:
go clean
rm -f *~ cpu.test mem.test
cover:
t=$(shell tempfile) ; go test -coverprofile $$t && go tool cover -html $$t && unlink $$t
cpu:
go test -c -o cpu.test
./cpu.test -noerr -test.cpuprofile cpu.out
go tool pprof --lines cpu.test cpu.out
editor:
gofmt -l -s -w *.go
go test
go install
internalError:
egrep -ho '"internal error.*"' *.go | sort | cat -n
later:
@grep -n $(grep) LATER * || true
@grep -n $(grep) MAYBE * || true
mem:
go test -c -o mem.test
./mem.test -test.bench . -test.memprofile mem.out
go tool pprof --lines --web --alloc_space mem.test mem.out
nuke: clean
go clean -i
todo:
@grep -nr $(grep) ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* * || true
@grep -nr $(grep) TODO * || true
@grep -nr $(grep) BUG * || true
@grep -nr $(grep) [^[:alpha:]]println * || true

8
_vendor/vendor/github.com/cznic/strutil/README generated vendored Normal file
View File

@ -0,0 +1,8 @@
This is a goinstall-able mirror of modified code already published at:
http://git.nic.cz/redmine/projects/gostrutil/repository
Online godoc documentation for this package (should be) available at:
http://gopkgdoc.appspot.com/pkg/github.com/cznic/strutil
Installation:
$ go get github.com/cznic/strutil

645
_vendor/vendor/github.com/cznic/strutil/strutil.go generated vendored Normal file
View File

@ -0,0 +1,645 @@
// Copyright (c) 2014 The sortutil Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package strutil collects utils supplemental to the standard strings package.
package strutil
import (
"bytes"
"encoding/base32"
"encoding/base64"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"strings"
"sync"
)
// Base32ExtDecode decodes base32 extended (RFC 4648) text to binary data.
func Base32ExtDecode(text []byte) (data []byte, err error) {
n := base32.HexEncoding.DecodedLen(len(text))
data = make([]byte, n)
decoder := base32.NewDecoder(base32.HexEncoding, bytes.NewBuffer(text))
if n, err = decoder.Read(data); err != nil {
n = 0
}
data = data[:n]
return
}
// Base32ExtEncode encodes binary data to base32 extended (RFC 4648) encoded text.
func Base32ExtEncode(data []byte) (text []byte) {
n := base32.HexEncoding.EncodedLen(len(data))
buf := bytes.NewBuffer(make([]byte, 0, n))
encoder := base32.NewEncoder(base32.HexEncoding, buf)
encoder.Write(data)
encoder.Close()
if buf.Len() != n {
panic("internal error")
}
return buf.Bytes()
}
// Base64Decode decodes base64 text to binary data.
func Base64Decode(text []byte) (data []byte, err error) {
n := base64.StdEncoding.DecodedLen(len(text))
data = make([]byte, n)
decoder := base64.NewDecoder(base64.StdEncoding, bytes.NewBuffer(text))
if n, err = decoder.Read(data); err != nil {
n = 0
}
data = data[:n]
return
}
// Base64Encode encodes binary data to base64 encoded text.
func Base64Encode(data []byte) (text []byte) {
n := base64.StdEncoding.EncodedLen(len(data))
buf := bytes.NewBuffer(make([]byte, 0, n))
encoder := base64.NewEncoder(base64.StdEncoding, buf)
encoder.Write(data)
encoder.Close()
if buf.Len() != n {
panic("internal error")
}
return buf.Bytes()
}
// Formatter is an io.Writer extended by a fmt.Printf like function Format
type Formatter interface {
io.Writer
Format(format string, args ...interface{}) (n int, errno error)
}
type indentFormatter struct {
io.Writer
indent []byte
indentLevel int
state int
}
const (
st0 = iota
stBOL
stPERC
stBOLPERC
)
// IndentFormatter returns a new Formatter which interprets %i and %u in the
// Format() format string as indent and undent commands. The commands can
// nest. The Formatter writes to io.Writer 'w' and inserts one 'indent'
// string per current indent level value.
// Behaviour of commands reaching negative indent levels is undefined.
// IndentFormatter(os.Stdout, "\t").Format("abc%d%%e%i\nx\ny\n%uz\n", 3)
// output:
// abc3%e
// x
// y
// z
// The Go quoted string literal form of the above is:
// "abc%%e\n\tx\n\tx\nz\n"
// The commands can be scattered between separate invocations of Format(),
// i.e. the formatter keeps track of the indent level and knows if it is
// positioned on start of a line and should emit indentation(s).
// The same output as above can be produced by e.g.:
// f := IndentFormatter(os.Stdout, " ")
// f.Format("abc%d%%e%i\nx\n", 3)
// f.Format("y\n%uz\n")
func IndentFormatter(w io.Writer, indent string) Formatter {
return &indentFormatter{w, []byte(indent), 0, stBOL}
}
func (f *indentFormatter) format(flat bool, format string, args ...interface{}) (n int, errno error) {
buf := []byte{}
for i := 0; i < len(format); i++ {
c := format[i]
switch f.state {
case st0:
switch c {
case '\n':
cc := c
if flat && f.indentLevel != 0 {
cc = ' '
}
buf = append(buf, cc)
f.state = stBOL
case '%':
f.state = stPERC
default:
buf = append(buf, c)
}
case stBOL:
switch c {
case '\n':
cc := c
if flat && f.indentLevel != 0 {
cc = ' '
}
buf = append(buf, cc)
case '%':
f.state = stBOLPERC
default:
if !flat {
for i := 0; i < f.indentLevel; i++ {
buf = append(buf, f.indent...)
}
}
buf = append(buf, c)
f.state = st0
}
case stBOLPERC:
switch c {
case 'i':
f.indentLevel++
f.state = stBOL
case 'u':
f.indentLevel--
f.state = stBOL
default:
if !flat {
for i := 0; i < f.indentLevel; i++ {
buf = append(buf, f.indent...)
}
}
buf = append(buf, '%', c)
f.state = st0
}
case stPERC:
switch c {
case 'i':
f.indentLevel++
f.state = st0
case 'u':
f.indentLevel--
f.state = st0
default:
buf = append(buf, '%', c)
f.state = st0
}
default:
panic("unexpected state")
}
}
switch f.state {
case stPERC, stBOLPERC:
buf = append(buf, '%')
}
return f.Write([]byte(fmt.Sprintf(string(buf), args...)))
}
func (f *indentFormatter) Format(format string, args ...interface{}) (n int, errno error) {
return f.format(false, format, args...)
}
type flatFormatter indentFormatter
// FlatFormatter returns a newly created Formatter with the same functionality as the one returned
// by IndentFormatter except it allows a newline in the 'format' string argument of Format
// to pass through iff indent level is currently zero.
//
// If indent level is non-zero then such new lines are changed to a space character.
// There is no indent string, the %i and %u format verbs are used solely to determine the indent level.
//
// The FlatFormatter is intended for flattening of normally nested structure textual representation to
// a one top level structure per line form.
// FlatFormatter(os.Stdout, " ").Format("abc%d%%e%i\nx\ny\n%uz\n", 3)
// output in the form of a Go quoted string literal:
// "abc3%%e x y z\n"
func FlatFormatter(w io.Writer) Formatter {
return (*flatFormatter)(IndentFormatter(w, "").(*indentFormatter))
}
func (f *flatFormatter) Format(format string, args ...interface{}) (n int, errno error) {
return (*indentFormatter)(f).format(true, format, args...)
}
// Pool handles aligning of strings having equal values to the same string instance.
// Intended use is to conserve some memory e.g. where a large number of identically valued strings
// with non identical backing arrays may exists in several semantically distinct instances of some structs.
// Pool is *not* concurrent access safe. It doesn't handle common prefix/suffix aligning,
// e.g. having s1 == "abc" and s2 == "bc", s2 is not automatically aligned as s1[1:].
type Pool struct {
pool map[string]string
}
// NewPool returns a newly created Pool.
func NewPool() *Pool {
return &Pool{map[string]string{}}
}
// Align returns a string with the same value as its argument. It guarantees that
// all aligned strings share a single instance in memory.
func (p *Pool) Align(s string) string {
if a, ok := p.pool[s]; ok {
return a
}
s = StrPack(s)
p.pool[s] = s
return s
}
// Count returns the number of items in the pool.
func (p *Pool) Count() int {
return len(p.pool)
}
// GoPool is a concurrent access safe version of Pool.
type GoPool struct {
pool map[string]string
rwm *sync.RWMutex
}
// NewGoPool returns a newly created GoPool.
func NewGoPool() (p *GoPool) {
return &GoPool{map[string]string{}, &sync.RWMutex{}}
}
// Align returns a string with the same value as its argument. It guarantees that
// all aligned strings share a single instance in memory.
func (p *GoPool) Align(s string) (y string) {
if s != "" {
p.rwm.RLock() // R++
if a, ok := p.pool[s]; ok { // found
p.rwm.RUnlock() // R--
return a
}
p.rwm.RUnlock() // R--
// not found but with a race condition, retry within a write lock
p.rwm.Lock() // W++
defer p.rwm.Unlock() // W--
if a, ok := p.pool[s]; ok { // done in a race
return a
}
// we won
s = StrPack(s)
p.pool[s] = s
return s
}
return
}
// Count returns the number of items in the pool.
func (p *GoPool) Count() int {
return len(p.pool)
}
// Dict is a string <-> id bijection. Dict is *not* concurrent access safe for assigning new ids
// to strings not yet contained in the bijection.
// Id for an empty string is guaranteed to be 0,
// thus Id for any non empty string is guaranteed to be non zero.
type Dict struct {
si map[string]int
is []string
}
// NewDict returns a newly created Dict.
func NewDict() (d *Dict) {
d = &Dict{map[string]int{}, []string{}}
d.Id("")
return
}
// Count returns the number of items in the dict.
func (d *Dict) Count() int {
return len(d.is)
}
// Id maps string s to its numeric identificator.
func (d *Dict) Id(s string) (y int) {
if y, ok := d.si[s]; ok {
return y
}
s = StrPack(s)
y = len(d.is)
d.si[s] = y
d.is = append(d.is, s)
return
}
// S maps an id to its string value and ok == true. Id values not contained in the bijection
// return "", false.
func (d *Dict) S(id int) (s string, ok bool) {
if id >= len(d.is) {
return "", false
}
return d.is[id], true
}
// GoDict is a concurrent access safe version of Dict.
type GoDict struct {
si map[string]int
is []string
rwm *sync.RWMutex
}
// NewGoDict returns a newly created GoDict.
func NewGoDict() (d *GoDict) {
d = &GoDict{map[string]int{}, []string{}, &sync.RWMutex{}}
d.Id("")
return
}
// Count returns the number of items in the dict.
func (d *GoDict) Count() int {
return len(d.is)
}
// Id maps string s to its numeric identificator. The implementation honors getting
// an existing id at the cost of assigning a new one.
func (d *GoDict) Id(s string) (y int) {
d.rwm.RLock() // R++
if y, ok := d.si[s]; ok { // found
d.rwm.RUnlock() // R--
return y
}
d.rwm.RUnlock() // R--
// not found but with a race condition
d.rwm.Lock() // W++ recheck with write lock
defer d.rwm.Unlock() // W--
if y, ok := d.si[s]; ok { // some other goroutine won already
return y
}
// a race free not found state => insert the string
s = StrPack(s)
y = len(d.is)
d.si[s] = y
d.is = append(d.is, s)
return
}
// S maps an id to its string value and ok == true. Id values not contained in the bijection
// return "", false.
func (d *GoDict) S(id int) (s string, ok bool) {
d.rwm.RLock() // R++
defer d.rwm.RUnlock() // R--
if id >= len(d.is) {
return "", false
}
return d.is[id], true
}
// StrPack returns a new instance of s which is tightly packed in memory.
// It is intended for avoiding the situation where having a live reference
// to a string slice over an unreferenced biger underlying string keeps the biger one
// in memory anyway - it can't be GCed.
func StrPack(s string) string {
return string([]byte(s))
}
// JoinFields returns strings in flds joined by sep. Flds may contain arbitrary
// bytes, including the sep as they are safely escaped. JoinFields panics if
// sep is the backslash character or if len(sep) != 1.
func JoinFields(flds []string, sep string) string {
if len(sep) != 1 || sep == "\\" {
panic("invalid separator")
}
a := make([]string, len(flds))
for i, v := range flds {
v = strings.Replace(v, "\\", "\\0", -1)
a[i] = strings.Replace(v, sep, "\\1", -1)
}
return strings.Join(a, sep)
}
// SplitFields splits s, which must be produced by JoinFields using the same
// sep, into flds. SplitFields panics if sep is the backslash character or if
// len(sep) != 1.
func SplitFields(s, sep string) (flds []string) {
if len(sep) != 1 || sep == "\\" {
panic("invalid separator")
}
a := strings.Split(s, sep)
r := make([]string, len(a))
for i, v := range a {
v = strings.Replace(v, "\\1", sep, -1)
r[i] = strings.Replace(v, "\\0", "\\", -1)
}
return r
}
// PrettyPrintHooks allow to customize the result of PrettyPrint for types
// listed in the map value.
type PrettyPrintHooks map[reflect.Type]func(f Formatter, v interface{}, prefix, suffix string)
// PrettyString returns the output of PrettyPrint as a string.
func PrettyString(v interface{}, prefix, suffix string, hooks PrettyPrintHooks) string {
var b bytes.Buffer
PrettyPrint(&b, v, prefix, suffix, hooks)
return b.String()
}
// PrettyPrint pretty prints v to w. Zero values and unexported struct fields
// are omitted.
func PrettyPrint(w io.Writer, v interface{}, prefix, suffix string, hooks PrettyPrintHooks) {
if v == nil {
return
}
f := IndentFormatter(w, "· ")
defer func() {
if e := recover(); e != nil {
f.Format("\npanic: %v", e)
}
}()
prettyPrint(nil, f, prefix, suffix, v, hooks)
}
func prettyPrint(protect map[interface{}]struct{}, sf Formatter, prefix, suffix string, v interface{}, hooks PrettyPrintHooks) {
if v == nil {
return
}
rt := reflect.TypeOf(v)
if handler := hooks[rt]; handler != nil {
handler(sf, v, prefix, suffix)
return
}
rv := reflect.ValueOf(v)
switch rt.Kind() {
case reflect.Slice:
if rv.Len() == 0 {
return
}
sf.Format("%s[]%T{ // len %d%i\n", prefix, rv.Index(0).Interface(), rv.Len())
for i := 0; i < rv.Len(); i++ {
prettyPrint(protect, sf, fmt.Sprintf("%d: ", i), ",\n", rv.Index(i).Interface(), hooks)
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%u}" + suffix)
case reflect.Array:
if reflect.Zero(rt).Interface() == rv.Interface() {
return
}
sf.Format("%s[%d]%T{%i\n", prefix, rv.Len(), rv.Index(0).Interface())
for i := 0; i < rv.Len(); i++ {
prettyPrint(protect, sf, fmt.Sprintf("%d: ", i), ",\n", rv.Index(i).Interface(), hooks)
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%u}" + suffix)
case reflect.Struct:
if rt.NumField() == 0 {
return
}
if reflect.DeepEqual(reflect.Zero(rt).Interface(), rv.Interface()) {
return
}
sf.Format("%s%T{%i\n", prefix, v)
for i := 0; i < rt.NumField(); i++ {
f := rv.Field(i)
if !f.CanInterface() {
continue
}
prettyPrint(protect, sf, fmt.Sprintf("%s: ", rt.Field(i).Name), ",\n", f.Interface(), hooks)
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%u}" + suffix)
case reflect.Ptr:
if rv.IsNil() {
return
}
rvi := rv.Interface()
if _, ok := protect[rvi]; ok {
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s&%T{ /* recursive/repetitive pointee not shown */ }"+suffix, prefix, rv.Elem().Interface())
return
}
if protect == nil {
protect = map[interface{}]struct{}{}
}
protect[rvi] = struct{}{}
prettyPrint(protect, sf, prefix+"&", suffix, rv.Elem().Interface(), hooks)
case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8:
if v := rv.Int(); v != 0 {
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%v"+suffix, prefix, v)
}
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8:
if v := rv.Uint(); v != 0 {
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%v"+suffix, prefix, v)
}
case reflect.Float32, reflect.Float64:
if v := rv.Float(); v != 0 {
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%v"+suffix, prefix, v)
}
case reflect.Complex64, reflect.Complex128:
if v := rv.Complex(); v != 0 {
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%v"+suffix, prefix, v)
}
case reflect.Uintptr:
if v := rv.Uint(); v != 0 {
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%v"+suffix, prefix, v)
}
case reflect.UnsafePointer:
s := fmt.Sprintf("%p", rv.Interface())
if s == "0x0" {
return
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%s"+suffix, prefix, s)
case reflect.Bool:
if v := rv.Bool(); v {
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%v"+suffix, prefix, rv.Bool())
}
case reflect.String:
s := rv.Interface().(string)
if s == "" {
return
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%q"+suffix, prefix, s)
case reflect.Chan:
if reflect.Zero(rt).Interface() == rv.Interface() {
return
}
c := rv.Cap()
s := ""
if c != 0 {
s = fmt.Sprintf("// capacity: %d", c)
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%s%s %s%s"+suffix, prefix, rt.ChanDir(), rt.Elem().Name(), s)
case reflect.Func:
if rv.IsNil() {
return
}
var in, out []string
for i := 0; i < rt.NumIn(); i++ {
x := reflect.Zero(rt.In(i))
in = append(in, fmt.Sprintf("%T", x.Interface()))
}
if rt.IsVariadic() {
i := len(in) - 1
in[i] = "..." + in[i][2:]
}
for i := 0; i < rt.NumOut(); i++ {
out = append(out, rt.Out(i).Name())
}
s := "(" + strings.Join(in, ", ") + ")"
t := strings.Join(out, ", ")
if len(out) > 1 {
t = "(" + t + ")"
}
if t != "" {
t = " " + t
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%sfunc%s%s { ... }"+suffix, prefix, s, t)
case reflect.Map:
keys := rv.MapKeys()
if len(keys) == 0 {
return
}
var buf bytes.Buffer
nf := IndentFormatter(&buf, "· ")
var skeys []string
for i, k := range keys {
prettyPrint(protect, nf, "", "", k.Interface(), hooks)
skeys = append(skeys, fmt.Sprintf("%s%10d", buf.Bytes(), i))
buf.Reset()
}
sort.Strings(skeys)
sf.Format("%s%T{%i\n", prefix, v)
for _, k := range skeys {
si := strings.TrimSpace(k[len(k)-10:])
k = k[:len(k)-10]
n, _ := strconv.ParseUint(si, 10, 64)
mv := rv.MapIndex(keys[n])
prettyPrint(protect, sf, fmt.Sprintf("%s: ", k), ",\n", mv.Interface(), hooks)
}
suffix = strings.Replace(suffix, "%", "%%", -1)
sf.Format("%u}" + suffix)
}
}

11
_vendor/vendor/github.com/cznic/y/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,11 @@
# This file lists authors for copyright purposes. This file is distinct from
# the CONTRIBUTORS files. See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
#
# The email address is not required for organizations.
#
# Please keep the list sorted.
Jan Mercl <0xjnml@gmail.com>

9
_vendor/vendor/github.com/cznic/y/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,9 @@
# This file lists people who contributed code to this repository. The AUTHORS
# file lists the copyright holders; this file lists people.
#
# Names should be added to this file like so:
# Name <email address>
#
# Please keep the list sorted.
Jan Mercl <0xjnml@gmail.com>

27
_vendor/vendor/github.com/cznic/y/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2014 The y Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

55
_vendor/vendor/github.com/cznic/y/Makefile generated vendored Normal file
View File

@ -0,0 +1,55 @@
# Copyright 2014 The y Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
.PHONY: all clean editor later nuke todo internalError cover mem cpu y.test bison
grep=--include=*.go
all: editor
go tool vet -printfuncs "Log:0,Logf:1" *.go
golint .
make todo
bison:
find -name \*.y -execdir bison -r all --report-file {}.bison -o/dev/null {} \;
clean:
go clean
rm -f *~ y.output mem.out cpu.out y.test
cover:
t=$(shell tempfile) ; go test -coverprofile $$t && go tool cover -html $$t && unlink $$t
cpu: y.test
./$< -noerr -test.cpuprofile cpu.out
go tool pprof --lines $< cpu.out
editor:
gofmt -l -s -w .
go test -i
go test
go install
internalError:
egrep -ho '".*internal error.*"' *.go | sort | cat -n
later:
@grep -n $(grep) LATER * || true
@grep -n $(grep) MAYBE * || true
mem: y.test
./$< -noerr -test.memprofile mem.out -test.v # -test.memprofilerate 1
go tool pprof --lines --alloc_space $< mem.out
nuke: clean
go clean -i
todo:
@grep -nr $(grep) ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* * || true
@grep -nr $(grep) TODO * || true
@grep -nr $(grep) BUG * || true
@grep -nr $(grep) [^[:alpha:]]println * || true
y.test:
go test -c

10
_vendor/vendor/github.com/cznic/y/README.md generated vendored Normal file
View File

@ -0,0 +1,10 @@
y
=
Package y converts yacc source code to data structures suitable for a parser generator.
Installation
$ go get github.com/cznic/y
Documentation: [godoc.org/github.com/cznic/y](http://godoc.org/github.com/cznic/y)

966
_vendor/vendor/github.com/cznic/y/api.go generated vendored Normal file
View File

@ -0,0 +1,966 @@
// Copyright 2014 The y Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//TODO +Engine, and export it.
// Package y converts .y (yacc[2]) source files to data suitable for a parser
// generator.
//
// Changelog
//
// 2015-02-23: Added methods Parser.{AcceptsEmptyInput,SkeletonXErrors}.
//
// 2015-01-16: Added Parser.Reductions and State.Reduce0 methods.
//
// 2014-12-18: Support %precedence for better bison compatibility[5].
//
// Links
//
// Referenced from elsewhere:
//
// [0]: http://godoc.org/github.com/cznic/goyacc
// [1]: http://people.via.ecp.fr/~stilgar/doc/compilo/parser/Generating%20LR%20Syntax%20Error%20Messages.pdf
// [2]: http://dinosaur.compilertools.net/yacc/
// [3]: http://dinosaur.compilertools.net/lex/index.html
// [4]: https://www.gnu.org/software/bison/manual/html_node/Using-Mid_002dRule-Actions.html
// [5]: http://www.gnu.org/software/bison/manual/html_node/Precedence-Only.html#Precedence-Only
// [6]: http://www.gnu.org/software/bison/manual/html_node/Token-Decl.html#Token-Decl
package y
import (
"bytes"
"fmt"
"go/ast"
"go/token"
"io"
"io/ioutil"
"sort"
"strconv"
"strings"
yparser "github.com/cznic/parser/yacc"
)
// Values of {AssocDef,Rule,Sym}.Associativity
const (
AssocNotSpecified = iota
AssocLeft // %left
AssocRight // %right
AssocNone // %nonassoc
AssocPrecedence // %precedence
)
// Action describes one cell of the parser table, ie. the action to be taken when
// the lookahead is Sym.
type Action struct {
Sym *Symbol
arg int
}
// Kind returns typ: 'a' for accept, 's' for shift, 'r' for reduce and 'g' for goto.
//
// For 'a' arg is not used.
// For 's' arg is the state number to shift to.
// For 'r' arg is the rule number to reduce.
// For 'g' arg is the state number to goto.
func (a Action) Kind() (typ, arg int) {
if !a.Sym.IsTerminal {
return 'g', a.arg
}
switch arg := a.arg; {
case arg < 0:
return 'r', -arg
case arg > 0:
return 's', arg
}
return 'a', -1
}
// AssocDef describes one association definition of the .y source code. For
// example:
//
// %left '+', '-'
// %left '*', '/'
//
// The above will produce two items in Parser.AssocDefs with the particular
// values of the associativity and precendce recorded in the Associativity and
// Precedence fields of the respective Syms element.
type AssocDef struct {
Associativity int // One of the nonzero Assoc* constant values.
Syms []*Symbol // Symbols present for this association definition in the order of appearance.
}
// Options amend the behavior of the various Process* functions.
//
// Error Examples
//
// Error examples implement the ideas in "Generating LR Syntax Error Messages
// from Examples"[1]. They extend the capability of a LALR parser to produce
// better error messages.
//
// XErrorSrc is a sequence of Go tokens separated by white space using the same
// rules as valid Go source code except that semicolon injection is not used.
// Comments of both short and long form are equal to white space. An example
// consists of an optional state set prefix followed by zero or more token
// specifiers followed by an error message. A state set is zero or more
// integer literals. Token specifier is a valid Go identifier or a Go
// character literal. The error message is a Go string literal. The EBNF is
//
// ErrorExamples = { { INT_LIT } { IDENTIFIER | CHAR_LIT } STRING_LIT } .
//
// The identifiers used in XErrorsSrc must be those defined as tokens in the
// yacc file. An implicit $end token is inserted at the end of the example
// input if no state set is given for that example. Examples with a state set
// are assumed to always specify the error-triggering lookahead token as the
// last example token, which is usually, but not necessarily the reserved
// error terminal symbol. If an example has a state set but no example tokens,
// a $end is used as an example. For example:
//
// /*
// Reject empty file
// */
// /* $end inserted here*/ "invalid empty input"
//
// PACKAGE /* $end inserted here */
// "Unexpected EOF"
//
// PACKAGE ';' /* $end inserted here even though parsing stops at ';' */
// `Missing package name or newline after "package"`
//
// vs
//
//
// /*
// Reject empty file
// */
// 0
// /* $end inserted here */ "invalid empty input"
//
// 2
// PACKAGE error /* no $end inserted here */
// `Missing package name or newline after "package"`
//
// Other examples
//
// PACKAGE IDENT ';'
// IMPORT STRING_LIT ','
// "multiple imports must be separated by semicolons"
//
// // Make the semicolon injection error a bit more user friendly.
// PACKAGE ';'
// `Missing package name or newline after "package"`
//
// // A calculator parser might have error examples like
// NUMBER '+' "operand expected"
// NUMBER '-' error "invalid operand for subtraction"
//
// Use a specific bad token to provide a specific message:
//
// // Coders frequently make this mistake.
// FOO BAR BAZ "baz cannot follow bar, only qux or frob can"
//
// Use the reserved error token to be less specific:
//
// // Catch any invalid token sequences after foo bar.
// FOO BAR error "bar must be followed by qux or frob"
//
// Terminate the token sequence to detect premature end of file:
//
// PACKAGE "missing package name"
//
// Similar to lex[3], examples sharing the same "action" can be joined by
// the | operator:
//
// CONST |
// FUNC |
// IMPORT |
// TYPE |
// VAR "package clause must be first"
//
// It's an error if the example token sequence is accepted by the parser, ie.
// if it does not produce an error.
//
// Note: In the case of example with a state set, the example tokens, except for
// the last one, serve only documentation purposes. Only the combination of a state and a particular
// lookahead is actually considered by the parser.
//
// Examples without a state set are processed differently and all the example
// tokens matter. An attempt is made to find the applicable state set
// automatically, but this computation is not yet completely functional and
// possibly only a subset of the real error states are produced.
type Options struct {
AllowConflicts bool // Do not report unresolved conflicts as errors.
AllowTypeErrors bool // Continue even if type checks fail.
Closures bool // Report non kernel items.
LA bool // Report all lookahead sets.
Report io.Writer // If non nil, write a grammar report to Report.
Resolved bool // Explain how conflicts were resolved.
Reducible bool // Check if all states are reducible. (Expensive)
XErrorsName string // Name used to report errors in XErrorsSrc, defaults to <xerrors>.
XErrorsSrc []byte // Used to produce errors by example[1].
debugSyms bool
// In conflict with xerrors processing.
noDefault bool // Disable collapsing largest reduce lookahead set to $default.
}
func (o *Options) boot(fset *token.FileSet) (*Options, error) {
if o == nil {
return &Options{}, nil
}
p := *o
p.noDefault = true
return &p, nil
}
// Parser describes the resulting parser. The intended client is a parser
// generator (like eg. [0]) producing the final Go source code.
type Parser struct {
AssocDefs []*AssocDef // %left, %right, %nonassoc definitions in the order of appearance in the source code.
ConflictsRR int // Number of reduce/reduce conflicts.
ConflictsSR int // Number of shift/reduce conflicts.
Definitions []*yparser.Definition // All definitions.
ErrorVerbose bool // %error-verbose is present.
LiteralStrings map[string]*Symbol // See Symbol.LiteralString field.
Prologue string // Collected prologue between the %{ and %} marks.
Rules []*Rule // Rules indexed by rule number.
Start string // Name of the start production.
States []*State // Parser states indexed by state number.
Syms map[string]*Symbol // Symbols indexed by name, eg. "IDENT", "Expression" or "';'".
Table [][]Action // Indexed by state number.
Tail string // Everyting after the second %%, if present.
Union *ast.StructType // %union as Go AST.
UnionSrc string // %union as Go source form.
XErrors []XError // Errors by example[1] descriptions.
y *y //
}
func newParser() *Parser {
return &Parser{}
}
func (p *Parser) parse(stopState int, lex func() *Symbol) (int, error) {
eof := p.Syms["$end"]
yystate := 0
var yyS []int
var yychar *Symbol
stack:
for i := 0; i < 100; i++ {
if yystate == stopState {
return yystate, nil
}
yyS = append(yyS, yystate)
if yychar == nil {
yychar = lex()
if yychar == nil {
yychar = eof
}
}
for _, act := range p.Table[yystate] {
if act.Sym != yychar {
continue
}
switch typ, arg := act.Kind(); typ {
case 'a':
return yystate, nil
case 's':
yychar = nil
yystate = arg
case 'r':
rule := p.Rules[arg]
n := len(yyS)
m := len(rule.Components)
yyS = yyS[:n-m]
n -= m
tos := yyS[n-1]
yystate = p.States[tos].gotos[rule.Sym].arg
}
continue stack
}
return yystate, fmt.Errorf("no action for %s in state %d", yychar, yystate)
}
return yystate, fmt.Errorf("parser stall in state %d", yystate)
}
// AcceptsEmptyInput returns whether the token string [$end] is accepted by the
// grammar.
func (p *Parser) AcceptsEmptyInput() bool {
toks, la := p.States[1].Syms0()
return len(toks) == 0 && la == p.y.endSym
}
func (s *State) skeletonXErrors(y *y) (nonTerminals, terminals map[*Symbol]struct{}) {
for _, item := range s.kernel {
for {
sym := item.next(y)
if sym == nil || sym.IsTerminal {
break
}
if sym.IsEmpty() {
item = newItem(item.rule(), item.dot()+1)
continue
}
if nonTerminals == nil {
nonTerminals = map[*Symbol]struct{}{}
}
nonTerminals[sym] = struct{}{}
if !sym.DerivesEmpty() {
break
}
item = newItem(item.rule(), item.dot()+1)
}
}
for _, item := range s.xitems {
if sym := item.next(y); sym != nil && !sym.IsTerminal {
if nonTerminals == nil {
nonTerminals = map[*Symbol]struct{}{}
}
nonTerminals[sym] = struct{}{}
}
}
terminals = map[*Symbol]struct{}{}
for k := range s.actions {
if k == y.errSym {
return nil, nil
}
terminals[k] = struct{}{}
}
return nonTerminals, terminals
}
// SkeletonXErrors writes an automatically generated errors by example file to
// w.
func (p *Parser) SkeletonXErrors(w io.Writer) error {
if !p.AcceptsEmptyInput() {
if _, err := fmt.Fprintf(w, `/*
Reject empty input
*/
0
"invalid empty input"
`); err != nil {
return err
}
}
type t struct {
states []int
syms []string
}
errs := map[string]t{}
for _, state := range p.States {
nt, t := state.skeletonXErrors(p.y)
if nt == nil && t == nil {
continue
}
var nta, ta []string
m := map[string]bool{}
for k := range nt {
nm := k.Name
if s := k.LiteralString; s != "" {
s2, err := strconv.Unquote(s)
if err != nil {
p.y.err(k.Pos, "unquote %s: %v", s, err)
}
nm = s2
}
nm = strings.TrimSpace(nm)
if m[nm] {
continue
}
m[nm] = true
nta = append(nta, nm)
}
sort.Strings(nta)
for k := range t {
nm := k.Name
if s := k.LiteralString; s != "" {
s2, err := strconv.Unquote(s)
if err != nil {
p.y.err(k.Pos, "unquote %s: %v", s, err)
}
nm = s2
}
nm = strings.TrimSpace(nm)
if m[nm] {
continue
}
m[nm] = true
ta = append(ta, nm)
}
sort.Strings(ta)
snt := strings.Join(nta, " or ")
if len(nta) != 0 {
snt += " or "
}
st := strings.Join(ta, ", ")
if len(ta) > 1 {
st = "one of [" + st + "]"
}
s := fmt.Sprintf("expected %s%s", snt, st)
v := errs[s]
v.states = append(v.states, state.id)
syms0, _ := state.Syms0()
syms := fmt.Sprintf("%v", syms0)
syms = syms[1 : len(syms)-1]
v.syms = append(v.syms, syms)
errs[s] = v
}
var a []string
for k := range errs {
a = append(a, k)
}
sort.Strings(a)
for _, msg := range a {
if _, err := fmt.Fprintln(w); err != nil {
return err
}
v := errs[msg]
for i, state := range v.states {
syms := v.syms[i]
if syms != "" {
syms = " // " + syms
}
if _, err := fmt.Fprintf(w, "%d%s\n", state, syms); err != nil {
return err
}
}
if _, err := fmt.Fprintf(w, "error %q\n", msg); err != nil {
return err
}
}
return nil
}
// Reductions returns a mapping rule# -> []state#. The slice is a sorted set of
// states in which the corresponding rule is reduced.
func (p *Parser) Reductions() map[int][]int {
m := map[int][]int{}
for state, actions := range p.Table {
for _, act := range actions {
if typ, arg := act.Kind(); typ == 'r' {
m[arg] = append(m[arg], state)
}
}
}
for k, v := range m {
sort.Ints(v)
m[k] = v
}
return m
}
// ProcessAST processes yacc source code parsed in ast. It returns a *Parser or
// an error, if any.
func ProcessAST(fset *token.FileSet, ast *yparser.Specification, opts *Options) (*Parser, error) {
y, err := processAST(fset, ast, opts)
if y == nil {
return nil, err
}
y.Parser.y = y
for i, row := range y.Parser.Table {
sort.Sort(actions(row))
y.Parser.Table[i] = row
}
return y.Parser, err
}
// ProcessFile processes yacc source code in a named file. It returns a *Parser
// or an error, if any.
func ProcessFile(fset *token.FileSet, fname string, opts *Options) (*Parser, error) {
b, err := ioutil.ReadFile(fname)
if err != nil {
return nil, err
}
return ProcessSource(fset, fname, b, opts)
}
// ProcessSource processes yacc source code in src. It returns a *Parser or an
// error, if any.
func ProcessSource(fset *token.FileSet, fname string, src []byte, opts *Options) (*Parser, error) {
ast, err := yparser.Parse(fset, fname, src)
if err != nil {
return nil, err
}
return ProcessAST(fset, ast, opts)
}
// Rule describes a single yacc rule, for example (in source form)
//
// Start:
// Prologue Body Epilogue
// {
// $$ = &ast{$1, $2, $3}
// }
//
// Inner rule actions
//
// A rule can prescribe semantic actions not only at the end. For example
//
// Foo:
// Bar
// {
// initBar($1)
// }
// Qux
// {
// handleQux($3)
// }
//
// Such constructs are rewritten as
//
// $@1:
// {
// initBar($1)
// }
//
// Foo:
// Bar $@1 Qux
// {
// handleQux($3)
// }
//
// The $@1 and similar is a synthetic rule and such have non nil Parent.
// MaxParentDlr is used to check that the semantic action does not access
// parent values not yet shifted to the parse stack as well as to compute the
// position of the $n thing on the parse stack. See also [4].
type Rule struct {
Action *yparser.Action // The semantic action associated with the rule, if any. If present then also the last element of Body.
Associativity int // One of the assoc* constants.
Body []interface{} // Rule components - int, string or *yparser.Action
Components []string // Textual forms of the rule components, for example []string{"IDENT", "';'"}
ExplicitPrecSym *Symbol // Symbol used in the optional %prec sym clause, if present.
MaxParentDlr int // See the Rule type docs for details.
Name *yparser.Token // The rule name token, if any (otherwise the rule starts with "|").
Parent *Rule // Non nil if a synthetic rule.
PrecSym *Symbol // Effective %prec symbol used, if any.
Precedence int // -1 if no precedence assigned.
RuleNum int // Zero based rule number. Rule #0 is synthetic.
Sym *Symbol // LHS of the rule.
Token *yparser.Token // yparser.IDENT or "|"
maxDlr int
pos token.Pos
syms []*Symbol
}
// Actions returns the textual representation of r.Actions combined.
func (r *Rule) Actions() string {
var buf bytes.Buffer
for _, v := range r.Action.Values {
switch v.Type {
case yparser.ActionValueDlrDlr:
buf.WriteString("$$")
case yparser.ActionValueDlrNum:
buf.WriteString(fmt.Sprintf("$%d", v.Num))
case yparser.ActionValueDlrTagDlr:
buf.WriteString(fmt.Sprintf("$<%s>$", v.Tag))
case yparser.ActionValueDlrTagNum:
buf.WriteString(fmt.Sprintf("$<%s>%d", v.Tag, v.Num))
default:
buf.WriteString(v.Src)
}
}
return buf.String()
}
// State represents one state of the parser.
type State struct {
actions map[*Symbol][]action //
distance int // On path to state 0.
gotos map[*Symbol]action //
id int // Numeric id of the state.
kernel itemSet //
lookahead []symSet // kernel LA.
parent *State // On path to state 0.
psym *Symbol // Label for the edge parent -> state.
resolved []string //TODO non string data.
sym *Symbol // Sym transfering from parent to state.
trans map[trans]stateItem // sym.i -> stateItem
xitems itemSet // {x ∈ closure(kernel) | x.rule -> ε }.
xla []symSet // xitems LA.
y *y //
}
func newState(y *y, s itemSet) *State {
return &State{
actions: map[*Symbol][]action{},
gotos: map[*Symbol]action{},
kernel: s,
lookahead: make([]symSet, len(s)),
trans: map[trans]stateItem{},
y: y,
}
}
func (s *State) zpath() []int {
if s == nil {
return nil
}
return append(s.parent.zpath(), s.id)
}
func (s *State) syms0() []*Symbol {
s.y.zeroPaths()
if s.parent == nil {
return nil
}
sym := s.psym
if sym.IsTerminal {
return append(s.parent.syms0(), sym)
}
return append(s.parent.syms0(), sym.MinString()...)
}
// Syms0 returns an example of a string and a lookahead, if any, required to
// get to state s starting at state 0. If s is shifted into the lookahead is
// nil.
//
// Note: Invalid grammars and grammars with conflicts may have not all states
// reachable.
//
// To construct an example of a string for which the parser enters state s:
//
// syms, la := s.Syms0()
// if la != nil {
// syms = append(syms, la)
// }
//
func (s *State) Syms0() ([]*Symbol, *Symbol) {
str := s.syms0()
if s.parent == nil {
return str, nil
}
if s.psym.IsTerminal {
return str, nil
}
str0 := str
var a []string
for sym := range s.actions {
str = append(str0, sym)
if stop, _ := s.y.parse(s.id, func() *Symbol {
if len(str) == 0 {
return nil
}
r := str[0]
str = str[1:]
return r
}); stop == s.id {
a = append(a, sym.Name)
}
}
if len(a) == 0 {
return str0, nil
}
sort.Strings(a)
return str0, s.y.Syms[a[0]]
}
// Reduce0 returns an example of a string required to reduce rule r in state s
// starting at state 0. If states s does not reduce rule r the string is empty.
//
// Note: Invalid grammars and grammars with conflicts may have not all states
// reachable and/or not all productions reducible.
func (s *State) Reduce0(r *Rule) []*Symbol {
rn := r.RuleNum
las := []string{}
for la, acts := range s.actions {
act := acts[0]
if act.kind == 'r' && act.arg == rn {
las = append(las, la.Name)
}
}
if len(las) == 0 {
return nil
}
syms, _ := s.Syms0()
sort.Strings(las)
return append(syms, s.y.Syms[las[0]])
}
// A special default symbol has Name "$default" and represents the default
// action.
// Symbol represents a terminal or non terminal symbol. A special end symbol
// has Name "$end" and represents the EOF token.
//
// LiteralString field
//
// Some parser generators accept an optional literal string token associated
// with a token definition. From [6]:
//
// You can associate a literal string token with a token type name by
// writing the literal string at the end of a %token declaration which
// declares the name. For example:
//
// %token arrow "=>"
//
// For example, a grammar for the C language might specify these names
// with equivalent literal string tokens:
//
// %token <operator> OR "||"
// %token <operator> LE 134 "<="
// %left OR "<="
//
// Once you equate the literal string and the token name, you can use them
// interchangeably in further declarations or the grammar rules. The yylex
// function can use the token name or the literal string to obtain the
// token type code number (see Calling Convention). Syntax error messages
// passed to yyerror from the parser will reference the literal string
// instead of the token name.
//
// The LiteralString captures the value of other definitions as well, namely
// also for %type definitions.
//
// %type CommaOpt "optional comma"
//
// %%
//
// CommaOpt:
// /* empty */
// | ','
type Symbol struct {
Associativity int // One of the assoc* constants.
ExplicitValue int // Explicit numeric value of the symbol or -1 if none.
IsLeftRecursive bool // S: S ... ;
IsRightRecursive bool // S: ... S ;
IsTerminal bool // Whether this is a terminal symbol.
LiteralString string // See the "LiteralString field" part of the Symbol godocs.
Name string // Textual value of the symbol, for example "IDENT" or "';'".
Pos token.Pos // Position where the symbol was firstly introduced.
Precedence int // -1 of no precedence assigned.
Rules []*Rule // Productions associated with this symbol.
Type string // For example "int", "float64" or "foo", but possibly also "".
Value int // Assigned numeric value of the symbol.
derivesE bool // Non terminal sym derives ε.
derivesEValid bool //
first1 symSet //
firstValid bool //
follow symSet //
id int // Index into y.syms
minStr []*Symbol //
minStrOk bool //
}
// IsEmpty reports whether s derives only ε.
func (s *Symbol) IsEmpty() bool {
return len(s.Rules) == 1 && len(s.Rules[0].Components) == 0
}
func (s *Symbol) derivesEmpty(m map[*Symbol]bool) bool {
if m[s] {
return false
}
m[s] = true
if s.IsTerminal {
return false
}
if s.derivesEValid {
return s.derivesE
}
nextRule:
for _, rule := range s.Rules {
if len(rule.Components) == 0 {
s.derivesE = true
s.derivesEValid = true
return true
}
for _, sym := range rule.syms {
if !sym.derivesEmpty(m) {
continue nextRule
}
}
s.derivesE = true
s.derivesEValid = true
return true
}
s.derivesE = false
s.derivesEValid = true
return false
}
// DerivesEmpty returns whether s derives ε.
func (s *Symbol) DerivesEmpty() bool {
return s.derivesEmpty(map[*Symbol]bool{})
}
// - dragon 4.4
// - http://www.cs.virginia.edu/~cs415/reading/FirstFollowLL.pdf
func (s *Symbol) first(y *y) (r symSet) {
if s.firstValid {
return s.first1
}
s.firstValid = true
r = y.newSymSet(-1)
for _, rule := range s.Rules {
if len(rule.Components) == 0 {
r.addEmpty()
break
}
}
s.first1 = r
defer func() {
s.first1 = r
}()
if s.IsTerminal {
return y.newSymSet(s.id)
}
if s == y.emptySym {
return y.newSymSet(s.id)
}
nextRule:
for _, rule := range s.Rules {
for _, sym := range rule.syms {
t := sym.first(y)
r.add(t, false)
if !t.hasEmpty() {
continue nextRule
}
}
r.addEmpty()
}
return r
}
// MinString returns an example of a string of symbols which can be reduced to
// s. If s is a terminal symbol the result is s. If the only way to express
// some non terminal s includes s itself then nil is returned (and the grammar
// is invalid).
func (s *Symbol) MinString() (r []*Symbol) {
r, _ = s.minString(nil)
return r
}
func (s *Symbol) minString(m map[*Symbol]int) (r []*Symbol, ok bool) {
if str := s.minStr; str != nil {
return str, s.minStrOk
}
defer func() {
s.minStr = r
s.minStrOk = ok
}()
if s.IsTerminal {
return []*Symbol{s}, true
}
if s.DerivesEmpty() {
return []*Symbol{}, true
}
if m[s] != 0 {
return nil, false
}
if m == nil {
m = map[*Symbol]int{}
}
m[s]++
defer func() { m[s]-- }()
var best []*Symbol
var bestHasError bool
nextRule:
for _, rule := range s.Rules {
var current []*Symbol
hasError := false
for _, sym := range rule.syms {
if sym.Name == "error" {
hasError = true
}
str, ok := sym.minString(m)
if !ok {
continue nextRule
}
current = append(current, str...)
}
ok = true
switch {
case best == nil:
best = current
bestHasError = hasError
case best != nil && bestHasError && !hasError:
best = current
bestHasError = false
case best != nil && !bestHasError && hasError:
// nop
case len(current) < len(best):
best = current
case len(current) == len(best):
for i, a := range current {
b := best[i]
if a.Name > b.Name {
break
}
if a.Name < b.Name {
best = current
break
}
}
}
}
return best, ok
}
// String implements fmt.Stringer.
func (s *Symbol) String() string {
if s != nil {
return s.Name
}
return "<nil>"
}
// XError describes the parser state for an error by example. See [1].
type XError struct {
Stack []int // Parser states stack, potentially partial, of the error event. TOS is Stack[len(Stack)-1].
Lookahead *Symbol // Error lookahead symbol. Nil if LA is the reserved error symbol.
Msg string // Textual representation of the error condition.
}
func (x *XError) mapKey() string {
return fmt.Sprintf("%v %v", x.Stack, x.Lookahead)
}

2199
_vendor/vendor/github.com/cznic/y/y.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

796
parser/goyacc/main.go Normal file
View File

@ -0,0 +1,796 @@
// Copyright 2014 The goyacc Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This source code uses portions of code previously published in the Go tool
// yacc[0] program, the respective license can be found in the LICENSE-GO-YACC
// file.
// Goyacc is a version of yacc generating Go parsers.
//
// Usage
//
// Note: If no non flag arguments are given, goyacc reads standard input.
//
// goyacc [options] [input]
//
// options and (defaults)
// -c Report state closures. (false)
// -cr Check all states are reducible. (false)
// -dlval Debug value when runtime yyDebug >= 3. ("lval")
// -dlvalf Debug format of -dlval. ("%+v")
// -ex Explain how were conflicts resolved. (false)
// -l Disable line directives, for compatibility only - ignored. (false)
// -la Report all lookahead sets. (false)
// -o outputFile Parser output. ("y.go")
// -p prefix Name prefix to use in generated code. ("yy")
// -v reportFile Create grammar report. ("y.output")
// -xe examplesFile Generate error messages by examples. ("")
// -xegen examplesFile Generate a file suitable for -xe automatically from the grammar.
// The file must not exist. ("")
//
//
//
// Changelog
//
// 2015-03-24: The search for a custom error message is now extended to include
// also the last state that was shifted into, if any. This change resolves a
// problem in which a lookahead symbol is valid for a reduce action in state A,
// but the same symbol is later never accepted by any shift action in some
// state B which is popped from the state stack after the reduction is
// performed. The computed from example state is A but when the error is
// actually detected, the state is now B and the custom error was thus not
// used.
//
// 2015-02-23: Added -xegen flag. It can be used to automagically generate a
// skeleton errors by example file which can be, for example, edited and/or
// submited later as an argument of the -xe option.
//
// 2014-12-18: Support %precedence for better bison compatibility[3]. The
// actual changes are in packages goyacc is dependent on. Goyacc users should
// rebuild the binary:
//
// $ go get -u github.com/cznic/goyacc
//
// 2014-12-02: Added support for the optional yyLexerEx interface. The Reduced
// method can be useful for debugging and/or automatically producing examples
// by parsing code fragments. If it returns true the parser exits immediately
// with return value -1.
//
// Overview
//
// The generated parser is reentrant and mostly backwards compatible with
// parsers generated by go tool yacc[0]. yyParse expects to be given an
// argument that conforms to the following interface:
//
// type yyLexer interface {
// Lex(lval *yySymType) int
// Error(e string)
// }
//
// Optionally the argument to yyParse may implement the following interface:
//
// type yyLexerEx interface {
// yyLexer
// // Hook for recording a reduction.
// Reduced(rule, state int, lval *yySymType) (stop bool) // Client should copy *lval.
// }
//
// Lex should return the token identifier, and place other token information in
// lval (which replaces the usual yylval). Error is equivalent to yyerror in
// the original yacc.
//
// Code inside the parser may refer to the variable yylex, which holds the
// yyLexer passed to Parse.
//
// Multiple grammars compiled into a single program should be placed in
// distinct packages. If that is impossible, the "-p prefix" flag to yacc sets
// the prefix, by default yy, that begins the names of symbols, including
// types, the parser, and the lexer, generated and referenced by yacc's
// generated code. Setting it to distinct values allows multiple grammars to be
// placed in a single package.
//
// Differences wrt go tool yacc
//
// - goyacc implements ideas from "Generating LR Syntax Error Messages from
// Examples"[1]. Use the -xe flag to pass a name of the example file. For more
// details about the example format please see [2].
//
// - The grammar report includes example token sequences leading to the
// particular state. Can help understanding conflicts.
//
// - Minor changes in parser debug output.
//
// Links
//
// Referenced from elsewhere:
//
// [0]: http://golang.org/cmd/yacc/
// [1]: http://people.via.ecp.fr/~stilgar/doc/compilo/parser/Generating%20LR%20Syntax%20Error%20Messages.pdf
// [2]: http://godoc.org/github.com/cznic/y#hdr-Error_Examples
// [3]: http://www.gnu.org/software/bison/manual/html_node/Precedence-Only.html#Precedence-Only
package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"go/format"
"go/scanner"
"go/token"
"io"
"io/ioutil"
"log"
"os"
"runtime"
"sort"
"strings"
"github.com/cznic/mathutil"
"github.com/cznic/parser/yacc"
"github.com/cznic/sortutil"
"github.com/cznic/strutil"
"github.com/cznic/y"
)
var (
//oNoDefault = flag.Bool("nodefault", false, "disable generating $default actions")
oClosures = flag.Bool("c", false, "report state closures")
oReducible = flag.Bool("cr", false, "check all states are reducible")
oDlval = flag.String("dlval", "lval", "debug value (runtime yyDebug >= 3)")
oDlvalf = flag.String("dlvalf", "%+v", "debug format of -dlval (runtime yyDebug >= 3)")
oLA = flag.Bool("la", false, "report all lookahead sets")
oNoLines = flag.Bool("l", false, "disable line directives (for compatibility ony - ignored)")
oOut = flag.String("o", "y.go", "parser output")
oPref = flag.String("p", "yy", "name prefix to use in generated code")
oReport = flag.String("v", "y.output", "create grammar report")
oResolved = flag.Bool("ex", false, "explain how were conflicts resolved")
oXErrors = flag.String("xe", "", "generate eXtra errors from examples source file")
oXErrorsGen = flag.String("xegen", "", "generate error from examples source file automatically from the grammar")
)
func main() {
log.SetFlags(0)
defer func() {
_, file, line, ok := runtime.Caller(2)
if e := recover(); e != nil {
switch {
case ok:
log.Fatalf("%s:%d: panic: %v", file, line, e)
default:
log.Fatalf("panic: %v", e)
}
}
}()
flag.Parse()
var in string
switch flag.NArg() {
case 0:
in = os.Stdin.Name()
case 1:
in = flag.Arg(0)
default:
log.Fatal("expected at most one non flag argument")
}
if err := main1(in); err != nil {
switch x := err.(type) {
case scanner.ErrorList:
for _, v := range x {
fmt.Fprintf(os.Stderr, "%v\n", v)
}
os.Exit(1)
default:
log.Fatal(err)
}
}
}
type symUsed struct {
sym *y.Symbol
used int
}
type symsUsed []symUsed
func (s symsUsed) Len() int { return len(s) }
func (s symsUsed) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s symsUsed) Less(i, j int) bool {
if s[i].used > s[j].used {
return true
}
if s[i].used < s[j].used {
return false
}
return strings.ToLower(s[i].sym.Name) < strings.ToLower(s[j].sym.Name)
}
func main1(in string) (err error) {
var out io.Writer
if nm := *oOut; nm != "" {
var f *os.File
var e error
if f, err = os.Create(nm); err != nil {
return err
}
defer func() {
if e1 := f.Close(); e1 != nil && err == nil {
err = e1
}
}()
w := bufio.NewWriter(f)
defer func() {
if e1 := w.Flush(); e1 != nil && err == nil {
err = e1
}
}()
buf := bytes.NewBuffer(nil)
out = buf
defer func() {
var dest []byte
if dest, e = format.Source(buf.Bytes()); e != nil {
dest = buf.Bytes()
}
if _, e = w.Write(dest); e != nil && err == nil {
err = e
}
}()
}
var rep io.Writer
if nm := *oReport; nm != "" {
f, err1 := os.Create(nm)
if err1 != nil {
return err1
}
defer func() {
if e := f.Close(); e != nil && err == nil {
err = e
}
}()
w := bufio.NewWriter(f)
defer func() {
if e := w.Flush(); e != nil && err == nil {
err = e
}
}()
rep = w
}
var xerrors []byte
if nm := *oXErrors; nm != "" {
b, err1 := ioutil.ReadFile(nm)
if err1 != nil {
return err1
}
xerrors = b
}
p, err := y.ProcessFile(token.NewFileSet(), in, &y.Options{
//NoDefault: *oNoDefault,
AllowConflicts: true,
Closures: *oClosures,
LA: *oLA,
Reducible: *oReducible,
Report: rep,
Resolved: *oResolved,
XErrorsName: *oXErrors,
XErrorsSrc: xerrors,
})
if err != nil {
return err
}
if fn := *oXErrorsGen; fn != "" {
f, err := os.OpenFile(fn, os.O_RDWR|os.O_CREATE, 0666)
if err != nil {
return err
}
b := bufio.NewWriter(f)
if err := p.SkeletonXErrors(b); err != nil {
return err
}
if err := b.Flush(); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
}
msu := make(map[*y.Symbol]int, len(p.Syms)) // sym -> usage
for nm, sym := range p.Syms {
if nm == "" || nm == "ε" || nm == "$accept" || nm == "#" {
continue
}
msu[sym] = 0
}
var minArg, maxArg int
for _, state := range p.Table {
for _, act := range state {
msu[act.Sym]++
k, arg := act.Kind()
if k == 'a' {
continue
}
if k == 'r' {
arg = -arg
}
minArg, maxArg = mathutil.Min(minArg, arg), mathutil.Max(maxArg, arg)
}
}
su := make(symsUsed, 0, len(msu))
for sym, used := range msu {
su = append(su, symUsed{sym, used})
}
sort.Sort(su)
// ----------------------------------------------------------- Prologue
f := strutil.IndentFormatter(out, "\t")
f.Format("// CAUTION: Generated file - DO NOT EDIT.\n\n")
f.Format("%s", injectImport(p.Prologue))
f.Format(`
type %[1]sSymType %i%s%u
type %[1]sXError struct {
state, xsym int
}
`, *oPref, p.UnionSrc)
// ---------------------------------------------------------- Constants
nsyms := map[string]*y.Symbol{}
a := make([]string, 0, len(msu))
maxTokName := 0
for sym := range msu {
nm := sym.Name
if nm == "$default" || nm == "$end" || sym.IsTerminal && nm[0] != '\'' && sym.Value > 0 {
maxTokName = mathutil.Max(maxTokName, len(nm))
a = append(a, nm)
}
nsyms[nm] = sym
}
sort.Strings(a)
f.Format("\nconst (%i\n")
for _, v := range a {
nm := v
switch nm {
case "error":
nm = *oPref + "ErrCode"
case "$default":
nm = *oPref + "Default"
case "$end":
nm = *oPref + "EofCode"
}
f.Format("%s%s = %d\n", nm, strings.Repeat(" ", maxTokName-len(nm)+1), nsyms[v].Value)
}
minArg-- // eg: [-13, 42], minArg -14 maps -13 to 1 so zero cell values -> empty.
f.Format("\n%sMaxDepth = 200\n", *oPref)
f.Format("%sTabOfs = %d\n", *oPref, minArg)
f.Format("%u)")
// ---------------------------------------------------------- Variables
f.Format("\n\nvar (%i\n")
// Lex translation table
f.Format("%sXLAT = map[int]int{%i\n", *oPref)
xlat := make(map[int]int, len(su))
var errSym int
for i, v := range su {
if v.sym.Name == "error" {
errSym = i
}
xlat[v.sym.Value] = i
f.Format("%6d: %3d, // %s (%dx)\n", v.sym.Value, i, v.sym.Name, msu[v.sym])
}
f.Format("%u}\n")
// Symbol names
f.Format("\n%sSymNames = []string{%i\n", *oPref)
for _, v := range su {
f.Format("%q,\n", v.sym.Name)
}
f.Format("%u}\n")
// Reduction table
f.Format("\n%sReductions = map[int]struct{xsym, components int}{%i\n", *oPref)
for r, rule := range p.Rules {
f.Format("%d: {%d, %d},\n", r, xlat[rule.Sym.Value], len(rule.Components))
}
f.Format("%u}\n")
// XError table
f.Format("\n%[1]sXErrors = map[%[1]sXError]string{%i\n", *oPref)
for _, xerr := range p.XErrors {
state := xerr.Stack[len(xerr.Stack)-1]
xsym := -1
if xerr.Lookahead != nil {
xsym = xlat[xerr.Lookahead.Value]
}
f.Format("%[1]sXError{%d, %d}: \"%s\",\n", *oPref, state, xsym, xerr.Msg)
}
f.Format("%u}\n\n")
// Parse table
tbits := 32
switch n := mathutil.BitLen(maxArg - minArg + 1); {
case n < 8:
tbits = 8
case n < 16:
tbits = 16
}
f.Format("%sParseTab = [%d][]uint%d{%i\n", *oPref, len(p.Table), tbits)
nCells := 0
var tabRow sortutil.Uint64Slice
for si, state := range p.Table {
tabRow = tabRow[:0]
max := 0
for _, act := range state {
sym := act.Sym
xsym, ok := xlat[sym.Value]
if !ok {
panic("internal error 001")
}
max = mathutil.Max(max, xsym)
kind, arg := act.Kind()
switch kind {
case 'a':
arg = 0
case 'r':
arg *= -1
}
tabRow = append(tabRow, uint64(xsym)<<32|uint64(arg-minArg))
}
nCells += max
tabRow.Sort()
col := -1
if si%5 == 0 {
f.Format("// %d\n", si)
}
f.Format("{")
for i, v := range tabRow {
xsym := int(uint32(v >> 32))
arg := int(uint32(v))
if col+1 != xsym {
f.Format("%d: ", xsym)
}
switch {
case i == len(tabRow)-1:
f.Format("%d", arg)
default:
f.Format("%d, ", arg)
}
col = xsym
}
f.Format("},\n")
}
f.Format("%u}\n")
fmt.Fprintf(os.Stderr, "Parse table entries: %d of %d, x %d bits == %d bytes\n", nCells, len(p.Table)*len(msu), tbits, nCells*tbits/8)
if n := p.ConflictsSR; n != 0 {
fmt.Fprintf(os.Stderr, "conflicts: %d shift/reduce\n", n)
}
if n := p.ConflictsRR; n != 0 {
fmt.Fprintf(os.Stderr, "conflicts: %d reduce/reduce\n", n)
}
f.Format(`%u)
var %[1]sDebug = 0
type %[1]sLexer interface {
Lex(lval *%[1]sSymType) int
Error(s string)
}
type %[1]sLexerEx interface {
%[1]sLexer
Reduced(rule, state int, lval *%[1]sSymType) bool
}
func %[1]sSymName(c int) (s string) {
x, ok := %[1]sXLAT[c]
if ok {
return %[1]sSymNames[x]
}
return __yyfmt__.Sprintf("%%d", c)
}
func %[1]slex1(yylex %[1]sLexer, lval *%[1]sSymType) (n int) {
n = yylex.Lex(lval)
if n <= 0 {
n = %[1]sEofCode
}
if %[1]sDebug >= 3 {
__yyfmt__.Printf("\nlex %%s(%%#x %%d), %[4]s: %[3]s\n", %[1]sSymName(n), n, n, %[4]s)
}
return n
}
func %[1]sParse(yylex %[1]sLexer, cache *[]%[1]sSymType) int {
const yyError = %[2]d
yyEx, _ := yylex.(%[1]sLexerEx)
var yyn int
var yylval %[1]sSymType
var yyVAL %[1]sSymType
yyS := *cache
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
yyerrok := func() {
if %[1]sDebug >= 2 {
__yyfmt__.Printf("yyerrok()\n")
}
Errflag = 0
}
_ = yyerrok
yystate := 0
yychar := -1
var yyxchar int
var yyshift int
yyp := -1
goto yystack
ret0:
return 0
ret1:
return 1
yystack:
/* put a state and value onto the stack */
yyp++
if yyp >= len(yyS) {
nyys := make([]%[1]sSymType, len(yyS)*2)
copy(nyys, yyS)
yyS = nyys
*cache = yyS
}
yyS[yyp] = yyVAL
yyS[yyp].yys = yystate
yynewstate:
if yychar < 0 {
yychar = %[1]slex1(yylex, &yylval)
var ok bool
if yyxchar, ok = %[1]sXLAT[yychar]; !ok {
yyxchar = len(%[1]sSymNames) // > tab width
}
}
if %[1]sDebug >= 4 {
var a []int
for _, v := range yyS[:yyp+1] {
a = append(a, v.yys)
}
__yyfmt__.Printf("state stack %%v\n", a)
}
row := %[1]sParseTab[yystate]
yyn = 0
if yyxchar < len(row) {
if yyn = int(row[yyxchar]); yyn != 0 {
yyn += %[1]sTabOfs
}
}
switch {
case yyn > 0: // shift
yychar = -1
yyVAL = yylval
yystate = yyn
yyshift = yyn
if %[1]sDebug >= 2 {
__yyfmt__.Printf("shift, and goto state %%d\n", yystate)
}
if Errflag > 0 {
Errflag--
}
goto yystack
case yyn < 0: // reduce
case yystate == 1: // accept
if %[1]sDebug >= 2 {
__yyfmt__.Println("accept")
}
goto ret0
}
if yyn == 0 {
/* error ... attempt to resume parsing */
switch Errflag {
case 0: /* brand new error */
if %[1]sDebug >= 1 {
__yyfmt__.Printf("no action for %%s in state %%d\n", %[1]sSymName(yychar), yystate)
}
msg, ok := %[1]sXErrors[%[1]sXError{yystate, yyxchar}]
if !ok {
msg, ok = %[1]sXErrors[%[1]sXError{yystate, -1}]
}
if !ok && yyshift != 0 {
msg, ok = %[1]sXErrors[%[1]sXError{yyshift, yyxchar}]
}
if !ok {
msg, ok = %[1]sXErrors[%[1]sXError{yyshift, -1}]
}
if !ok || msg == "" {
msg = "syntax error"
}
yylex.Error(msg)
Nerrs++
fallthrough
case 1, 2: /* incompletely recovered error ... try again */
Errflag = 3
/* find a state where "error" is a legal shift action */
for yyp >= 0 {
row := %[1]sParseTab[yyS[yyp].yys]
if yyError < len(row) {
yyn = int(row[yyError])+%[1]sTabOfs
if yyn > 0 { // hit
if %[1]sDebug >= 2 {
__yyfmt__.Printf("error recovery found error shift in state %%d\n", yyS[yyp].yys)
}
yystate = yyn /* simulate a shift of "error" */
goto yystack
}
}
/* the current p has no shift on "error", pop stack */
if %[1]sDebug >= 2 {
__yyfmt__.Printf("error recovery pops state %%d\n", yyS[yyp].yys)
}
yyp--
}
/* there is no state on the stack with an error shift ... abort */
if %[1]sDebug >= 2 {
__yyfmt__.Printf("error recovery failed\n")
}
goto ret1
case 3: /* no shift yet; clobber input char */
if %[1]sDebug >= 2 {
__yyfmt__.Printf("error recovery discards %%s\n", %[1]sSymName(yychar))
}
if yychar == %[1]sEofCode {
goto ret1
}
yychar = -1
goto yynewstate /* try again in the same state */
}
}
r := -yyn
x0 := %[1]sReductions[r]
x, n := x0.xsym, x0.components
yypt := yyp
_ = yypt // guard against "declared and not used"
yyp -= n
if yyp+1 >= len(yyS) {
nyys := make([]%[1]sSymType, len(yyS)*2)
copy(nyys, yyS)
yyS = nyys
*cache = yyS
}
yyVAL = yyS[yyp+1]
/* consult goto table to find next state */
exState := yystate
yystate = int(%[1]sParseTab[yyS[yyp].yys][x])+%[1]sTabOfs
/* reduction by production r */
if %[1]sDebug >= 2 {
__yyfmt__.Printf("reduce using rule %%v (%%s), and goto state %%d\n", r, %[1]sSymNames[x], yystate)
}
switch r {%i
`,
*oPref, errSym, *oDlvalf, *oDlval)
for r, rule := range p.Rules {
if rule.Action == nil {
continue
}
action := rule.Action.Values
if len(action) == 0 {
continue
}
if len(action) == 1 {
part := action[0]
if part.Type == parser.ActionValueGo {
src := part.Src
src = src[1 : len(src)-1] // Remove lead '{' and trail '}'
if strings.TrimSpace(src) == "" {
continue
}
}
}
components := rule.Components
typ := rule.Sym.Type
max := len(components)
if p1 := rule.Parent; p1 != nil {
max = rule.MaxParentDlr
components = p1.Components
}
f.Format("case %d: ", r)
for _, part := range action {
num := part.Num
switch part.Type {
case parser.ActionValueGo:
f.Format("%s", part.Src)
case parser.ActionValueDlrDlr:
f.Format("yyVAL.%s", typ)
if typ == "" {
panic("internal error 002")
}
case parser.ActionValueDlrNum:
typ := p.Syms[components[num-1]].Type
if typ == "" {
panic("internal error 003")
}
f.Format("yyS[yypt-%d].%s", max-num, typ)
case parser.ActionValueDlrTagDlr:
f.Format("yyVAL.%s", part.Tag)
case parser.ActionValueDlrTagNum:
f.Format("yyS[yypt-%d].%s", max-num, part.Tag)
}
}
f.Format("\n")
}
f.Format(`%u
}
if yyEx != nil && yyEx.Reduced(r, exState, &yyVAL) {
return -1
}
goto yystack /* stack new state and value */
}
%[2]s
`, *oPref, p.Tail)
_ = oNoLines //TODO Ignored for now
return nil
}
func injectImport(src string) string {
const inj = `
import __yyfmt__ "fmt"
`
fset := token.NewFileSet()
file := fset.AddFile("", -1, len(src))
var s scanner.Scanner
s.Init(
file,
[]byte(src),
nil,
scanner.ScanComments,
)
for {
switch _, tok, _ := s.Scan(); tok {
case token.EOF:
return inj + src
case token.PACKAGE:
s.Scan() // ident
pos, _, _ := s.Scan()
ofs := file.Offset(pos)
return src[:ofs] + inj + src[ofs:]
}
}
}