changeset 1115:afd4133eb2f6

RT-20064: fix counting of lines and characters in css lexer so that lexer error messages can be more informative
author David Grieve<david.grieve@oracle.com>
date Tue, 22 May 2012 11:32:39 -0400
parents ae1d114a26e3
children 535fa5b98147
files javafx-ui-common/src/com/sun/javafx/css/parser/CSSLexer.java javafx-ui-common/src/com/sun/javafx/css/parser/Token.java javafx-ui-common/src/javafx/scene/doc-files/cssref.html javafx-ui-common/test/unit/com/sun/javafx/css/parser/CSSLexerTest.java
diffstat 4 files changed, 464 insertions(+), 52 deletions(-) [+]
line wrap: on
line diff
--- a/javafx-ui-common/src/com/sun/javafx/css/parser/CSSLexer.java	Tue May 22 12:09:26 2012 +1200
+++ b/javafx-ui-common/src/com/sun/javafx/css/parser/CSSLexer.java	Tue May 22 11:32:39 2012 -0400
@@ -364,6 +364,9 @@
 
     public void setReader(Reader reader) {
         this.reader = reader;
+        lastc = -1;
+        pos = offset = 0;
+        line = 1;
         this.currentState = initState;
         this.token = null;
         try {
@@ -374,25 +377,34 @@
     }
 
     private Token scanImportant()  throws IOException{
+        // CSS 2.1 grammar for important_sym
+        // "!"({w}|{comment})*{I}{M}{P}{O}{R}{T}{A}{N}{T}
         final Recognizer[] important_sym =
                 new Recognizer[] { I, M, P, O, R, T, A, N, T };
         int current = 0;
-
+        
+        text.append((char)ch);
+        
         // get past the '!'
         ch = readChar();
-
+       
         while(true) {
+            
             switch (ch) {
 
                 case Token.EOF:
                     token = Token.EOF_TOKEN;
                     return token;
 
-                case '/':
+                case '/':                    
                     ch = readChar();
                     if (ch == '*') skipComment();
-                    else return new Token(Token.INVALID);
-                    if (ch != -1) ch = readChar();
+                    else {
+                        text.append('/').append((char)ch);
+                        int temp = offset;
+                        offset = pos;
+                        return new Token(Token.INVALID, text.toString(), line, temp);
+                    }
                     break;
 
                 case ' ':
@@ -407,10 +419,13 @@
                     boolean accepted = true;
                     while(accepted && current < important_sym.length) {
                         accepted = important_sym[current++].recognize(ch);
+                        text.append((char)ch);
                         ch = readChar();
                     }
                     if (accepted) {
-                        return new Token(IMPORTANT_SYM, "!important");
+                        final int temp = offset;
+                        offset = pos-1; // will have read one char too many
+                        return new Token(IMPORTANT_SYM, "!important", line, temp);
                     } else {
                         while (ch != ';' &&
                                ch != '}' &&
@@ -418,7 +433,9 @@
                             ch = readChar();
                         }
                         if (ch != Token.EOF) {
-                            return Token.SKIP_TOKEN;
+                            final int temp = offset;
+                            offset = pos-1; // will have read one char too many
+                            return new Token(Token.SKIP, text.toString(), line, temp);
                         } else {
                             return Token.EOF_TOKEN;
                         }
@@ -532,6 +549,8 @@
             if (ch == '*') {
                 ch = readChar();
                 if (ch == '/') {
+                    offset = pos;
+                    ch=readChar();
                     break;
                 }
             } else {
@@ -540,7 +559,8 @@
         }
     }
 
-    private int pos = 1;
+    private int pos = 0;
+    private int offset = 0;
     private int line = 1;
     private int lastc = -1;
 
@@ -548,12 +568,17 @@
 
         int c = reader.read();
 
-        if (c == '\r' || (c == '\n' && lastc != '\r')) {
-            pos = 1;
+        // only reset line and pos counters after having read a NL since
+        // a NL token is created after the readChar
+        if (lastc == '\n' || (lastc == '\r' && c != '\n')) {
+            // set pos to 1 since we've already read the first char of the new line
+            pos = 1; 
+            offset = 0;
             line++;
         } else {
             pos++;
         }
+        
         lastc = c;
         return c;
     }
@@ -614,7 +639,7 @@
                     final int type = currentState.getType();
 
                     //
-                    // If the token is an INVALID and
+                    // If the token is INVALID and
                     // the currentState is something other than initState, then
                     // there is an error, so return INVALID.
                      //
@@ -622,9 +647,10 @@
                         !currentState.equals(initState)) {
 
                         final String str = text.toString();
-                        Token tok = new Token(type, str);
-                        tok.setOffset(pos);
-                        tok.setLine(line);
+                        Token tok = new Token(type, str, line, offset);
+                        // because the next char has already been read, 
+                        // the next token starts at pos-1
+                        offset = pos-1;
 
                         // return here, but the next char has already been read.
                         return tok;
@@ -650,9 +676,11 @@
                         }
 
                         if (ch != -1) {
-                            token = new Token(STRING, text.toString());
+                            token = new Token(STRING, text.toString(), line, offset);
+                            offset = pos;
                         } else {
-                            token = new Token(Token.INVALID);
+                            token = new Token(Token.INVALID, text.toString(), line, offset);
+                            offset = pos;
                         }
                         break;
 
@@ -661,8 +689,6 @@
                         if (ch == '*') {
                             skipComment();
                              if (ch != -1) {
-                                text.append((char)ch);
-                                ch = readChar();
                                 continue;
                             } else {
                                 token = Token.EOF_TOKEN;
@@ -670,79 +696,95 @@
                             }
                         } else {
                             // not a comment - a SOLIDUS
-                            token = new Token(SOLIDUS,"/");
+                            token = new Token(SOLIDUS,"/", line, offset);
+                            offset = pos;
                         }
                         break;
 
                     case '>':
 
-                        token = new Token(GREATER,">");
+                        token = new Token(GREATER,">", line, offset);
+                        offset = pos;
                         break;
 
                     case '{':
-                        token = new Token(LBRACE,"{");
+                        token = new Token(LBRACE,"{", line, offset);
+                        offset = pos;
                         break;
 
                     case '}':
-                        token = new Token(RBRACE,"}");
+                        token = new Token(RBRACE,"}", line, offset);
+                        offset = pos;
                         break;
 
                     case ';':
-                        token = new Token(SEMI,";");
+                        token = new Token(SEMI,";", line, offset);
+                        offset = pos;
                         break;
 
                     case ':':
-                        token = new Token(COLON,":");
+                        token = new Token(COLON,":", line, offset);
+                        offset = pos;
                         break;
 
                     case '*':
-                        token = new Token(STAR,"*");
+                        token = new Token(STAR,"*", line, offset);
+                        offset = pos;
                         break;
 
                     case '(':
-                        token = new Token(LPAREN,"(");
+                        token = new Token(LPAREN,"(", line, offset);
+                        offset = pos;
                         break;
 
                     case ')':
-                        token = new Token(RPAREN,")");
+                        token = new Token(RPAREN,")", line, offset);
+                        offset = pos;
                         break;
 
                     case ',':
-                        token = new Token(COMMA,",");
+                        token = new Token(COMMA,",", line, offset);
+                        offset = pos;
                         break;
 
                     case '.':
-                        token = new Token(DOT,".");
+                        token = new Token(DOT,".", line, offset);
+                        offset = pos;
                         break;
 
                     case ' ':
                     case '\t':
                     case '\f':
-                        token = new Token(WS, Character.toString((char)ch));
+                        token = new Token(WS, Character.toString((char)ch), line, offset);
+                        offset = pos;
                         break;
 
 
                     case '\r':
-                        token = new Token(NL);
-
+                        token = new Token(NL, "\\r", line, offset);
+                        // offset and pos are reset on next readChar
+                        
                         ch = readChar();
-                        if (ch != '\n') {
-                            token.setOffset(pos);
-                            token.setLine(line);
+                        if (ch == '\n') {
+                            token = new Token(NL, "\\r\\n", line, offset);
+                            // offset and pos are reset on next readChar
+                        } else {
+                            // already read the next character, so return
+                            // return the NL token here (avoid the readChar
+                            // at the end of the loop below)
                             final Token tok = token;
                             token = (ch == -1) ? Token.EOF_TOKEN : null;
                             return tok;
-                        }
+                        }                        
                         break;
 
                     case '\n':
-                        token = new Token(NL);
+                        token = new Token(NL, "\\n", line, offset);
+                        // offset and pos are reset on next readChar
                         break;
 
                     case '!':
                         Token tok = scanImportant();
-                        tok.setLine(line);
-                        tok.setOffset(pos);
                         return tok;
 
                     case '@':
@@ -754,25 +796,24 @@
                         if (ch == ';') {
                             ch = readChar();
                             token = Token.SKIP_TOKEN;
-
+                            offset = pos;
                         }
                         break;
 
                     default:
 //                      System.err.println("hit default case: ch = " + Character.toString((char)ch));
-                        token = new Token(Token.INVALID, Character.toString((char)ch));
+                        token = new Token(Token.INVALID, Character.toString((char)ch), line, offset);
+                        offset = pos;
                         break;
                 }
 
                 if (token == null) {
 //                    System.err.println("token is null! ch = " + Character.toString((char)ch));
-                    token = new Token(Token.INVALID, null);
+                    token = new Token(Token.INVALID, null, line, offset);
+                    offset = pos;
                 } else if (token.getType() == Token.EOF) {
                     return token;
-                } else {
-                    token.setLine(line);
-                    token.setOffset(pos);
-                }
+                } 
 
                 if (ch != -1) ch = readChar();
 
--- a/javafx-ui-common/src/com/sun/javafx/css/parser/Token.java	Tue May 22 12:09:26 2012 +1200
+++ b/javafx-ui-common/src/com/sun/javafx/css/parser/Token.java	Tue May 22 11:32:39 2012 -0400
@@ -36,11 +36,15 @@
     final static Token INVALID_TOKEN = new Token(INVALID, "INVALID");
     final static Token SKIP_TOKEN = new Token(SKIP, "SKIP");
 
-    Token(int type, String text) {
+    Token(int type, String text, int line, int offset) {
         this.type = type;
         this.text = text;
-        this.line = -1;
-        this.offset = -1;
+        this.line = line;
+        this.offset = offset;        
+    }
+    
+    Token(int type, String text) {
+        this(type, text, -1, -1);
     }
 
     Token(int type) {
--- a/javafx-ui-common/src/javafx/scene/doc-files/cssref.html	Tue May 22 12:09:26 2012 +1200
+++ b/javafx-ui-common/src/javafx/scene/doc-files/cssref.html	Tue May 22 11:32:39 2012 -0400
@@ -609,9 +609,8 @@
     <p>If the error is found while parsing a file, the file URL will be given.
       If the error is from an inline style (as in the example above), the URL is
       given as a question mark. The line and position give an offset into the
-      file or string where the token begins. <em>Note well!</em> The lexer does
-      a poor job of counting lines and characters. The line and position may not
-      be accurate. This should be resolved in a future release. <br>
+      file or string where the token begins. <em>Please note that the line and
+      position may not be accurate in releases prior to JavaFX 2.2.</em><br>
     </p>
     <p>Applications needing to detect errors from the parser can add a listener
       to the errors property of com.sun.javafx.css.StyleManager. This is not
--- a/javafx-ui-common/test/unit/com/sun/javafx/css/parser/CSSLexerTest.java	Tue May 22 12:09:26 2012 +1200
+++ b/javafx-ui-common/test/unit/com/sun/javafx/css/parser/CSSLexerTest.java	Tue May 22 11:32:39 2012 -0400
@@ -284,4 +284,372 @@
         );
     }
 
+    @Test 
+    public void testTokenOffset() {
+        
+        String str =  "a: b;";
+        // [?][0] = line
+        // [?][1] = offset
+        Token[] expected = {
+            new Token(CSSLexer.IDENT, "a", 1, 0),
+            new Token(CSSLexer.COLON, ":", 1, 1),
+            new Token(CSSLexer.WS,    " ", 1, 2),
+            new Token(CSSLexer.IDENT, "b", 1, 3),
+            new Token(CSSLexer.SEMI,  ";", 1, 4),
+            Token.EOF_TOKEN
+        };
+        
+        List<Token> tlist = getTokens(str);
+        checkTokens(tlist, expected);
+        
+        for(int n=0; n<tlist.size(); n++) {
+            Token tok = tlist.get(n);
+            assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine());
+            assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset());
+        }
+                
+    }
+    
+    @Test 
+    public void testTokenLineAndOffsetWithCR() {
+        
+        String str =  "a: b;\rc: d;";
+        // [?][0] = line
+        // [?][1] = offset
+        Token[] expected = {
+            new Token(CSSLexer.IDENT, "a", 1, 0),
+            new Token(CSSLexer.COLON, ":", 1, 1),
+            new Token(CSSLexer.WS,    " ", 1, 2),
+            new Token(CSSLexer.IDENT, "b", 1, 3),
+            new Token(CSSLexer.SEMI,  ";", 1, 4),
+            new Token(CSSLexer.NL,  "\\r", 1, 5),
+            new Token(CSSLexer.IDENT, "c", 2, 0),
+            new Token(CSSLexer.COLON, ":", 2, 1),
+            new Token(CSSLexer.WS,    " ", 2, 2),
+            new Token(CSSLexer.IDENT, "d", 2, 3),
+            new Token(CSSLexer.SEMI,  ";", 2, 4),
+            Token.EOF_TOKEN
+        };
+        
+        List<Token> tlist = getTokens(str);
+        checkTokens(tlist, expected);
+        
+        for(int n=0; n<tlist.size(); n++) {
+            Token tok = tlist.get(n);
+            assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine());
+            assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset());
+        }
+                
+    }
+
+    @Test 
+    public void testTokenLineAndOffsetWithLF() {
+        
+        String str =  "a: b;\nc: d;";
+        // [?][0] = line
+        // [?][1] = offset
+        Token[] expected = {
+            new Token(CSSLexer.IDENT, "a", 1, 0),
+            new Token(CSSLexer.COLON, ":", 1, 1),
+            new Token(CSSLexer.WS,    " ", 1, 2),
+            new Token(CSSLexer.IDENT, "b", 1, 3),
+            new Token(CSSLexer.SEMI,  ";", 1, 4),
+            new Token(CSSLexer.NL,  "\\n", 1, 5),
+            new Token(CSSLexer.IDENT, "c", 2, 0),
+            new Token(CSSLexer.COLON, ":", 2, 1),
+            new Token(CSSLexer.WS,    " ", 2, 2),
+            new Token(CSSLexer.IDENT, "d", 2, 3),
+            new Token(CSSLexer.SEMI,  ";", 2, 4),
+            Token.EOF_TOKEN
+        };
+        
+        List<Token> tlist = getTokens(str);
+        checkTokens(tlist, expected);
+        
+        for(int n=0; n<tlist.size(); n++) {
+            Token tok = tlist.get(n);
+            assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine());
+            assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset());
+        }
+                
+    }
+
+    @Test 
+    public void testTokenLineAndOffsetWithCRLF() {
+        //             012345   01234
+        String str =  "a: b;\r\nc: d;";
+        // [?][0] = line
+        // [?][1] = offset
+        Token[] expected = {
+            new Token(CSSLexer.IDENT, "a", 1, 0),
+            new Token(CSSLexer.COLON, ":", 1, 1),
+            new Token(CSSLexer.WS,    " ", 1, 2),
+            new Token(CSSLexer.IDENT, "b", 1, 3),
+            new Token(CSSLexer.SEMI,  ";", 1, 4),
+            new Token(CSSLexer.NL,  "\\r\\n", 1, 5),
+            new Token(CSSLexer.IDENT, "c", 2, 0),
+            new Token(CSSLexer.COLON, ":", 2, 1),
+            new Token(CSSLexer.WS,    " ", 2, 2),
+            new Token(CSSLexer.IDENT, "d", 2, 3),
+            new Token(CSSLexer.SEMI,  ";", 2, 4),
+            Token.EOF_TOKEN
+        };
+        
+        List<Token> tlist = getTokens(str);
+        checkTokens(tlist, expected);
+        
+        for(int n=0; n<tlist.size(); n++) {
+            Token tok = tlist.get(n);
+            assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine());
+            assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset());
+        }
+                
+    }
+    
+    @Test 
+    public void testTokenOffsetWithEmbeddedComment() {
+        //             0123456789012345
+        String str =  "a: /*comment*/b;";
+        // [?][0] = line
+        // [?][1] = offset
+        Token[] expected = {
+            new Token(CSSLexer.IDENT, "a", 1, 0),
+            new Token(CSSLexer.COLON, ":", 1, 1),
+            new Token(CSSLexer.WS,    " ", 1, 2),
+            new Token(CSSLexer.IDENT, "b", 1, 14), 
+            new Token(CSSLexer.SEMI,  ";", 1, 15),
+            Token.EOF_TOKEN
+        };
+        
+        List<Token> tlist = getTokens(str);
+        checkTokens(tlist, expected);
+        
+        for(int n=0; n<tlist.size(); n++) {
+            Token tok = tlist.get(n);
+            assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine());
+            assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset());
+        }
+    }
+
+    @Test 
+    public void testTokenLineAndOffsetWithLeadingComment() {
+        //             012345678901 01234
+        String str =  "/*comment*/\na: b;";
+        // [?][0] = line
+        // [?][1] = offset
+        Token[] expected = {
+            new Token(CSSLexer.NL, "\\n", 1, 11),
+            new Token(CSSLexer.IDENT, "a", 2, 0),
+            new Token(CSSLexer.COLON, ":", 2, 1),
+            new Token(CSSLexer.WS,    " ", 2, 2),
+            new Token(CSSLexer.IDENT, "b", 2, 3), 
+            new Token(CSSLexer.SEMI,  ";", 2, 4),
+            Token.EOF_TOKEN
+        };
+        
+        List<Token> tlist = getTokens(str);
+        checkTokens(tlist, expected);
+        
+        for(int n=0; n<tlist.size(); n++) {
+            Token tok = tlist.get(n);
+            assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine());
+            assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset());
+        }
+    }
+    
+    @Test 
+    public void testTokenOffsetWithFunction() {
+        //             01234567890
+        String str =  "a: b(arg);";
+        // [?][0] = line
+        // [?][1] = offset
+        Token[] expected = {
+            new Token(CSSLexer.IDENT, "a", 1, 0),
+            new Token(CSSLexer.COLON, ":", 1, 1),
+            new Token(CSSLexer.WS,    " ", 1, 2),
+            new Token(CSSLexer.IDENT, "b", 1, 3), 
+            new Token(CSSLexer.LPAREN, "(", 1, 4), 
+            new Token(CSSLexer.IDENT, "arg", 1, 5), 
+            new Token(CSSLexer.RPAREN, ")", 1, 8), 
+            new Token(CSSLexer.SEMI,  ";", 1, 9),
+            Token.EOF_TOKEN
+        };
+        
+        List<Token> tlist = getTokens(str);
+        checkTokens(tlist, expected);
+        
+        for(int n=0; n<tlist.size(); n++) {
+            Token tok = tlist.get(n);
+            assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine());
+            assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset());
+        }
+    }
+    
+    @Test 
+    public void testTokenOffsetWithHash() {
+        //             01234567890
+        String str =  "a: #012345;";
+        // [?][0] = line
+        // [?][1] = offset
+        Token[] expected = {
+            new Token(CSSLexer.IDENT, "a", 1, 0),
+            new Token(CSSLexer.COLON, ":", 1, 1),
+            new Token(CSSLexer.WS,    " ", 1, 2),
+            new Token(CSSLexer.HASH, "#012345", 1, 3), 
+            new Token(CSSLexer.SEMI,  ";", 1, 10),
+            Token.EOF_TOKEN
+        };
+        
+        List<Token> tlist = getTokens(str);
+        checkTokens(tlist, expected);
+        
+        for(int n=0; n<tlist.size(); n++) {
+            Token tok = tlist.get(n);
+            assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine());
+            assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset());
+        }
+    }
+ 
+    @Test 
+    public void testTokenOffsetWithDigits() {
+        //             01234567890
+        String str =  "a: 123.45;";
+        // [?][0] = line
+        // [?][1] = offset
+        Token[] expected = {
+            new Token(CSSLexer.IDENT, "a", 1, 0),
+            new Token(CSSLexer.COLON, ":", 1, 1),
+            new Token(CSSLexer.WS,    " ", 1, 2),
+            new Token(CSSLexer.NUMBER, "123.45", 1, 3), 
+            new Token(CSSLexer.SEMI,  ";", 1, 9),
+            Token.EOF_TOKEN
+        };
+        
+        List<Token> tlist = getTokens(str);
+        checkTokens(tlist, expected);
+        
+        for(int n=0; n<tlist.size(); n++) {
+            Token tok = tlist.get(n);
+            assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine());
+            assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset());
+        }
+    }
+
+    @Test 
+    public void testTokenOffsetWithBangImportant() {
+        //             0123456789012345
+        String str =  "a: b !important;";
+        // [?][0] = line
+        // [?][1] = offset
+        Token[] expected = {
+            new Token(CSSLexer.IDENT, "a", 1, 0),
+            new Token(CSSLexer.COLON, ":", 1, 1),
+            new Token(CSSLexer.WS,    " ", 1, 2),
+            new Token(CSSLexer.IDENT, "b", 1, 3),
+            new Token(CSSLexer.WS,    " ", 1, 4),
+            new Token(CSSLexer.IMPORTANT_SYM, "!important", 1, 5), 
+            new Token(CSSLexer.SEMI,  ";", 1, 15),
+            Token.EOF_TOKEN
+        };
+        
+        List<Token> tlist = getTokens(str);
+        checkTokens(tlist, expected);
+        
+        for(int n=0; n<tlist.size(); n++) {
+            Token tok = tlist.get(n);
+            assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine());
+            assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset());
+        }
+    }
+
+    @Test 
+    public void testTokenOffsetWithSkip() {
+        //             0123456789012345
+        String str =  "a: b !imporzant;";
+        // [?][0] = line
+        // [?][1] = offset
+        Token[] expected = {
+            new Token(CSSLexer.IDENT, "a", 1, 0),
+            new Token(CSSLexer.COLON, ":", 1, 1),
+            new Token(CSSLexer.WS,    " ", 1, 2),
+            new Token(CSSLexer.IDENT, "b", 1, 3),
+            new Token(CSSLexer.WS,    " ", 1, 4),
+            new Token(Token.SKIP, "!imporz", 1, 5), 
+            new Token(CSSLexer.SEMI,  ";", 1, 15),
+            Token.EOF_TOKEN
+        };
+        
+        List<Token> tlist = getTokens(str);
+        checkTokens(tlist, expected);
+        
+        for(int n=0; n<tlist.size(); n++) {
+            Token tok = tlist.get(n);
+            assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine());
+            assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset());
+        }
+    }
+    
+    @Test 
+    public void testTokenOffsetWithInvalid() {
+        //             0123456789012345
+        String str =  "a: 1pz;";
+        // [?][0] = line
+        // [?][1] = offset
+        Token[] expected = {
+            new Token(CSSLexer.IDENT, "a", 1, 0),
+            new Token(CSSLexer.COLON, ":", 1, 1),
+            new Token(CSSLexer.WS,    " ", 1, 2),
+            new Token(Token.INVALID, "1pz", 1, 3),
+            new Token(CSSLexer.SEMI,  ";", 1, 6),
+            Token.EOF_TOKEN
+        };
+        
+        List<Token> tlist = getTokens(str);
+        checkTokens(tlist, expected);
+        
+        for(int n=0; n<tlist.size(); n++) {
+            Token tok = tlist.get(n);
+            assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine());
+            assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset());
+        }
+    }
+
+    @Test
+    public void testTokenLineAndOffsetMoreFully() {
+        //             1            2                 3         4
+        //             012345678901 0123456789012345  012345678 0
+        String str =  "/*comment*/\n*.foo#bar:baz {\n\ta: 1em;\n}";
+        // [?][0] = line
+        // [?][1] = offset
+        Token[] expected = {
+            new Token(CSSLexer.NL,     "\\n",  1, 11),
+            new Token(CSSLexer.STAR,   "*",    2, 0),
+            new Token(CSSLexer.DOT,    ".",    2, 1),
+            new Token(CSSLexer.IDENT,  "foo",  2, 2),
+            new Token(CSSLexer.HASH,   "#bar", 2, 5),
+            new Token(CSSLexer.COLON,  ":",    2, 9),
+            new Token(CSSLexer.IDENT,  "baz",  2, 10),
+            new Token(CSSLexer.WS,     " ",    2, 13),
+            new Token(CSSLexer.LBRACE, "{",    2, 14),
+            new Token(CSSLexer.NL,     "\\n",  2, 15),
+            new Token(CSSLexer.WS,     "\t",   3, 0),
+            new Token(CSSLexer.IDENT,  "a",    3, 1),
+            new Token(CSSLexer.COLON,  ":",    3, 2),
+            new Token(CSSLexer.WS,     " ",    3, 3),
+            new Token(CSSLexer.EMS,    "1em",  3, 4), 
+            new Token(CSSLexer.SEMI,   ";",    3, 7),
+            new Token(CSSLexer.NL,     "\\n",  3, 8),
+            new Token(CSSLexer.RBRACE, "}",    4, 0),
+            Token.EOF_TOKEN
+        };
+        
+        List<Token> tlist = getTokens(str);
+        checkTokens(tlist, expected);
+        
+        for(int n=0; n<tlist.size(); n++) {
+            Token tok = tlist.get(n);
+            assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine());
+            assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset());
+        }
+    }    
 }