Commit ea006a85 authored by Robert Griesemer's avatar Robert Griesemer

go/scanner: don't eat \r in comments if that shortens the comment

For consistent formatting across platforms we strip \r's from
comments. This happens in the go/scanner which already does
this for raw string literals where it is mandated by the spec.
But if a (sequence of) \r appears in a regular (/*-style) comment
between a * and a /, removing that (sequence of) \r shortens that
text segment to */ which terminates the comment prematurely.

Don't do it.

As an aside, a better approach would be to not touch comments,
(and raw string literals for that matter) in the scanner and
leave the extra processing to clients. That is the approach
taken by the cmd/compile/internal/syntax package. However, we
probably can't change the semantics here too much, so just do
the minimal change that doesn't produce invalid comments. It's
an esoteric case after all.

Fixes #11151.

Change-Id: Ib4dcb52094f13c235e840c9672e439ea65fef961
Reviewed-on: https://go-review.googlesource.com/87498Reviewed-by: default avatarMatthew Dempsky <mdempsky@google.com>
parent c5f3a8b1
...@@ -710,3 +710,26 @@ type bar int // comment2 ...@@ -710,3 +710,26 @@ type bar int // comment2
t.Errorf("got %q, want %q", buf.String(), bar) t.Errorf("got %q, want %q", buf.String(), bar)
} }
} }
func TestIssue11151(t *testing.T) {
const src = "package p\t/*\r/1\r*\r/2*\r\r\r\r/3*\r\r+\r\r/4*/\n"
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
if err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
Fprint(&buf, fset, f)
got := buf.String()
const want = "package p\t/*/1*\r/2*\r/3*+/4*/\n" // \r following opening /* should be stripped
if got != want {
t.Errorf("\ngot : %q\nwant: %q", got, want)
}
// the resulting program must be valid
_, err = parser.ParseFile(fset, "", got, 0)
if err != nil {
t.Errorf("%v\norig: %q\ngot : %q", err, src, got)
}
}
...@@ -204,7 +204,7 @@ func (s *Scanner) scanComment() string { ...@@ -204,7 +204,7 @@ func (s *Scanner) scanComment() string {
exit: exit:
lit := s.src[offs:s.offset] lit := s.src[offs:s.offset]
if hasCR { if hasCR {
lit = stripCR(lit) lit = stripCR(lit, lit[1] == '*')
} }
return string(lit) return string(lit)
...@@ -480,11 +480,16 @@ func (s *Scanner) scanString() string { ...@@ -480,11 +480,16 @@ func (s *Scanner) scanString() string {
return string(s.src[offs:s.offset]) return string(s.src[offs:s.offset])
} }
func stripCR(b []byte) []byte { func stripCR(b []byte, comment bool) []byte {
c := make([]byte, len(b)) c := make([]byte, len(b))
i := 0 i := 0
for _, ch := range b { for j, ch := range b {
if ch != '\r' { // In a /*-style comment, don't strip \r from *\r/ (incl.
// sequences of \r from *\r\r...\r/) since the resulting
// */ would terminate the comment too early unless the \r
// is immediately following the opening /* in which case
// it's ok because /*/ is not closed yet (issue #11151).
if ch != '\r' || comment && i > len("/*") && c[i-1] == '*' && j+1 < len(b) && b[j+1] == '/' {
c[i] = ch c[i] = ch
i++ i++
} }
...@@ -514,7 +519,7 @@ func (s *Scanner) scanRawString() string { ...@@ -514,7 +519,7 @@ func (s *Scanner) scanRawString() string {
lit := s.src[offs:s.offset] lit := s.src[offs:s.offset]
if hasCR { if hasCR {
lit = stripCR(lit) lit = stripCR(lit, false)
} }
return string(lit) return string(lit)
......
...@@ -45,6 +45,8 @@ var tokens = [...]elt{ ...@@ -45,6 +45,8 @@ var tokens = [...]elt{
{token.COMMENT, "/* a comment */", special}, {token.COMMENT, "/* a comment */", special},
{token.COMMENT, "// a comment \n", special}, {token.COMMENT, "// a comment \n", special},
{token.COMMENT, "/*\r*/", special}, {token.COMMENT, "/*\r*/", special},
{token.COMMENT, "/**\r/*/", special}, // issue 11151
{token.COMMENT, "/**\r\r/*/", special},
{token.COMMENT, "//\r\n", special}, {token.COMMENT, "//\r\n", special},
// Identifiers and basic type literals // Identifiers and basic type literals
...@@ -270,7 +272,7 @@ func TestScan(t *testing.T) { ...@@ -270,7 +272,7 @@ func TestScan(t *testing.T) {
switch e.tok { switch e.tok {
case token.COMMENT: case token.COMMENT:
// no CRs in comments // no CRs in comments
elit = string(stripCR([]byte(e.lit))) elit = string(stripCR([]byte(e.lit), e.lit[1] == '*'))
//-style comment literal doesn't contain newline //-style comment literal doesn't contain newline
if elit[1] == '/' { if elit[1] == '/' {
elit = elit[0 : len(elit)-1] elit = elit[0 : len(elit)-1]
...@@ -284,7 +286,7 @@ func TestScan(t *testing.T) { ...@@ -284,7 +286,7 @@ func TestScan(t *testing.T) {
// no CRs in raw string literals // no CRs in raw string literals
elit = e.lit elit = e.lit
if elit[0] == '`' { if elit[0] == '`' {
elit = string(stripCR([]byte(elit))) elit = string(stripCR([]byte(elit), false))
} }
} else if e.tok.IsKeyword() { } else if e.tok.IsKeyword() {
elit = e.lit elit = e.lit
...@@ -309,6 +311,26 @@ func TestScan(t *testing.T) { ...@@ -309,6 +311,26 @@ func TestScan(t *testing.T) {
} }
} }
func TestStripCR(t *testing.T) {
for _, test := range []struct{ have, want string }{
{"//\n", "//\n"},
{"//\r\n", "//\n"},
{"//\r\r\r\n", "//\n"},
{"//\r*\r/\r\n", "//*/\n"},
{"/**/", "/**/"},
{"/*\r/*/", "/*/*/"},
{"/*\r*/", "/**/"},
{"/**\r/*/", "/**\r/*/"},
{"/*\r/\r*\r/*/", "/*/*\r/*/"},
{"/*\r\r\r\r*/", "/**/"},
} {
got := string(stripCR([]byte(test.have), len(test.have) >= 2 && test.have[1] == '*'))
if got != test.want {
t.Errorf("stripCR(%q) = %q; want %q", test.have, got, test.want)
}
}
}
func checkSemi(t *testing.T, line string, mode Mode) { func checkSemi(t *testing.T, line string, mode Mode) {
var S Scanner var S Scanner
file := fset.AddFile("TestSemis", fset.Base(), len(line)) file := fset.AddFile("TestSemis", fset.Base(), len(line))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment