Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
C
caddy
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Łukasz Nowak
caddy
Commits
31878151
Commit
31878151
authored
Jan 21, 2015
by
Matthew Holt
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Wrote lexer tests
parent
286d558c
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
156 additions
and
19 deletions
+156
-19
config/lexer.go
config/lexer.go
+17
-19
config/lexer_test.go
config/lexer_test.go
+139
-0
No files found.
config/lexer.go
View file @
31878151
...
...
@@ -6,15 +6,23 @@ import (
"unicode"
)
// lexer is a utility which can get values, token by
// token, from a reader. A token is a word, and tokens
// are separated by whitespace. A word can be enclosed in
// quotes if it contains whitespace.
type
lexer
struct
{
type
(
// lexer is a utility which can get values, token by
// token, from a reader. A token is a word, and tokens
// are separated by whitespace. A word can be enclosed in
// quotes if it contains whitespace.
lexer
struct
{
reader
*
bufio
.
Reader
token
token
line
int
}
}
// token represents a single processable unit.
token
struct
{
line
int
text
string
}
)
// load prepares the lexer to scan a file for tokens.
func
(
l
*
lexer
)
load
(
file
io
.
Reader
)
error
{
...
...
@@ -63,10 +71,6 @@ func (l *lexer) next() bool {
return
makeToken
()
}
}
if
ch
==
'\\'
&&
!
escaped
{
escaped
=
true
continue
}
if
ch
==
'\n'
{
l
.
line
++
}
...
...
@@ -108,9 +112,3 @@ func (l *lexer) next() bool {
val
=
append
(
val
,
ch
)
}
}
// token represents a single processable unit.
type
token
struct
{
line
int
text
string
}
config/lexer_test.go
0 → 100644
View file @
31878151
package
config
import
(
"strings"
"testing"
)
type
lexerTestCase
struct
{
input
string
expected
[]
token
}
func
TestLexer
(
t
*
testing
.
T
)
{
testCases
:=
[]
lexerTestCase
{
{
input
:
`host:123`
,
expected
:
[]
token
{
{
line
:
1
,
text
:
"host:123"
},
},
},
{
input
:
`host:123
directive`
,
expected
:
[]
token
{
{
line
:
1
,
text
:
"host:123"
},
{
line
:
3
,
text
:
"directive"
},
},
},
{
input
:
`host:123 {
directive
}`
,
expected
:
[]
token
{
{
line
:
1
,
text
:
"host:123"
},
{
line
:
1
,
text
:
"{"
},
{
line
:
2
,
text
:
"directive"
},
{
line
:
3
,
text
:
"}"
},
},
},
{
input
:
`host:123 { directive }`
,
expected
:
[]
token
{
{
line
:
1
,
text
:
"host:123"
},
{
line
:
1
,
text
:
"{"
},
{
line
:
1
,
text
:
"directive"
},
{
line
:
1
,
text
:
"}"
},
},
},
{
input
:
`host:123 {
#comment
directive
# comment
foobar # another comment
}`
,
expected
:
[]
token
{
{
line
:
1
,
text
:
"host:123"
},
{
line
:
1
,
text
:
"{"
},
{
line
:
3
,
text
:
"directive"
},
{
line
:
5
,
text
:
"foobar"
},
{
line
:
6
,
text
:
"}"
},
},
},
{
input
:
`a "quoted value" b
foobar`
,
expected
:
[]
token
{
{
line
:
1
,
text
:
"a"
},
{
line
:
1
,
text
:
"quoted value"
},
{
line
:
1
,
text
:
"b"
},
{
line
:
2
,
text
:
"foobar"
},
},
},
{
input
:
`A "quoted \"value\" inside" B`
,
expected
:
[]
token
{
{
line
:
1
,
text
:
"A"
},
{
line
:
1
,
text
:
`quoted "value" inside`
},
{
line
:
1
,
text
:
"B"
},
},
},
{
input
:
`A "quoted value with line
break inside" {
foobar
}`
,
expected
:
[]
token
{
{
line
:
1
,
text
:
"A"
},
{
line
:
1
,
text
:
"quoted value with line
\n\t\t\t\t\t
break inside"
},
{
line
:
2
,
text
:
"{"
},
{
line
:
3
,
text
:
"foobar"
},
{
line
:
4
,
text
:
"}"
},
},
},
{
input
:
"skip those
\r\n
CR characters"
,
expected
:
[]
token
{
{
line
:
1
,
text
:
"skip"
},
{
line
:
1
,
text
:
"those"
},
{
line
:
2
,
text
:
"CR"
},
{
line
:
2
,
text
:
"characters"
},
},
},
}
for
i
,
testCase
:=
range
testCases
{
actual
:=
tokenize
(
testCase
.
input
)
lexerCompare
(
t
,
i
,
testCase
.
expected
,
actual
)
}
}
func
tokenize
(
input
string
)
(
tokens
[]
token
)
{
l
:=
lexer
{}
l
.
load
(
strings
.
NewReader
(
input
))
for
l
.
next
()
{
tokens
=
append
(
tokens
,
l
.
token
)
}
return
}
func
lexerCompare
(
t
*
testing
.
T
,
n
int
,
expected
,
actual
[]
token
)
{
if
len
(
expected
)
!=
len
(
actual
)
{
t
.
Errorf
(
"Test case %d: expected %d token(s) but got %d"
,
n
,
len
(
expected
),
len
(
actual
))
}
for
i
:=
0
;
i
<
len
(
actual
)
&&
i
<
len
(
expected
);
i
++
{
if
actual
[
i
]
.
line
!=
expected
[
i
]
.
line
{
t
.
Errorf
(
"Test case %d token %d ('%s'): expected line %d but was line %d"
,
n
,
i
,
expected
[
i
]
.
text
,
expected
[
i
]
.
line
,
actual
[
i
]
.
line
)
break
}
if
actual
[
i
]
.
text
!=
expected
[
i
]
.
text
{
t
.
Errorf
(
"Test case %d token %d: expected text '%s' but was '%s'"
,
n
,
i
,
expected
[
i
]
.
text
,
actual
[
i
]
.
text
)
break
}
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment