KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > netbeans > lib > lexer > test > LexerTestUtilities


1 /*
2  * The contents of this file are subject to the terms of the Common Development
3  * and Distribution License (the License). You may not use this file except in
4  * compliance with the License.
5  *
6  * You can obtain a copy of the License at http://www.netbeans.org/cddl.html
7  * or http://www.netbeans.org/cddl.txt.
8  *
9  * When distributing Covered Code, include this CDDL Header Notice in each file
10  * and include the License file at http://www.netbeans.org/cddl.txt.
11  * If applicable, add the following below the CDDL Header, with the fields
12  * enclosed by brackets [] replaced by your own identifying information:
13  * "Portions Copyrighted [year] [name of copyright owner]"
14  *
15  * The Original Software is NetBeans. The Initial Developer of the Original
16  * Software is Sun Microsystems, Inc. Portions Copyright 1997-2007 Sun
17  * Microsystems, Inc. All Rights Reserved.
18  */

19
20 package org.netbeans.lib.lexer.test;
21
22 import java.lang.reflect.Field JavaDoc;
23 import java.util.ArrayList JavaDoc;
24 import java.util.Collection JavaDoc;
25 import java.util.Iterator JavaDoc;
26 import java.util.List JavaDoc;
27 import javax.swing.event.DocumentEvent JavaDoc;
28 import javax.swing.event.DocumentListener JavaDoc;
29 import javax.swing.text.BadLocationException JavaDoc;
30 import javax.swing.text.Document JavaDoc;
31 import junit.framework.TestCase;
32 import org.netbeans.api.lexer.Language;
33 import org.netbeans.api.lexer.Token;
34 import org.netbeans.api.lexer.TokenHierarchyEvent;
35 import org.netbeans.api.lexer.TokenHierarchyListener;
36 import org.netbeans.api.lexer.TokenHierarchy;
37 import org.netbeans.api.lexer.TokenId;
38 import org.netbeans.api.lexer.TokenSequence;
39 import org.netbeans.api.lexer.TokenUtilities;
40 import org.netbeans.junit.NbTestCase;
41 import org.netbeans.lib.lexer.TokenList;
42 import org.netbeans.lib.lexer.test.dump.TokenDumpCheck;
43
44
45 /**
46  * Various utilities related to lexer's and token testing.
47  *
48  * @author mmetelka
49  */

50 public final class LexerTestUtilities {
51     
52     /** Flag for additional correctness checks (may degrade performance). */
53     private static final boolean testing = Boolean.getBoolean("netbeans.debug.lexer.test");
54     
55     private static final String JavaDoc LAST_TOKEN_HIERARCHY = "last-token-hierarchy";
56
57     private static Field JavaDoc tokenListField;
58     
59     private LexerTestUtilities() {
60         // no instances
61
}
62     
63     /**
64      * @see #assertTokenEquals(String, TokenSequence, TokenId, String, int)
65      */

66     public static void assertTokenEquals(TokenSequence<? extends TokenId> ts, TokenId id, String JavaDoc text, int offset) {
67         assertTokenEquals(null, ts, id, text, offset);
68     }
69
70     /**
71      * Compare <code>TokenSequence.token()</code> to the given
72      * token id, text and offset.
73      *
74      * @param offset expected offset. It may be -1 to prevent offset testing.
75      */

76     public static void assertTokenEquals(String JavaDoc message, TokenSequence<? extends TokenId> ts, TokenId id, String JavaDoc text, int offset) {
77         message = messagePrefix(message);
78         Token<? extends TokenId> t = ts.token();
79         TestCase.assertNotNull("Token is null", t);
80         TokenId tId = t.id();
81         TestCase.assertEquals(message + "Invalid token.id()", id, tId);
82         CharSequence JavaDoc tText = t.text();
83         assertTextEquals(message + "Invalid token.text()", text, tText);
84         // The token's length must correspond to text.length()
85
TestCase.assertEquals(message + "Invalid token.length()", text.length(), t.length());
86
87         if (offset != -1) {
88             int tsOffset = ts.offset();
89             TestCase.assertEquals(message + "Invalid tokenSequence.offset()", offset, tsOffset);
90
91             // It should also be true that if the token is non-flyweight then
92
// ts.offset() == t.offset()
93
// and if it's flyweight then t.offset() == -1
94
int tOffset = t.offset(null);
95             assertTokenOffsetMinusOneForFlyweight(t.isFlyweight(), tOffset);
96             if (!t.isFlyweight()) {
97                 assertTokenOffsetsEqual(message, tOffset, offset);
98             }
99         }
100     }
101     
102     public static void assertTokenEquals(TokenSequence<? extends TokenId> ts, TokenId id, String JavaDoc text, int offset,
103     int lookahead, Object JavaDoc state) {
104         assertTokenEquals(null, ts, id, text, offset, lookahead, state);
105     }
106
107     public static void assertTokenEquals(String JavaDoc message, TokenSequence<? extends TokenId> ts, TokenId id, String JavaDoc text, int offset,
108     int lookahead, Object JavaDoc state) {
109         assertTokenEquals(message, ts, id, text, offset);
110
111         Token t = ts.token();
112         message = messagePrefix(message);
113         TestCase.assertEquals(message + "Invalid token.lookahead()", lookahead, lookahead(ts));
114         TestCase.assertEquals(message + "Invalid token.state()", state, state(ts));
115     }
116     
117     public static void assertTokenOffsetsEqual(String JavaDoc message, int offset1, int offset2) {
118         if (offset1 != -1 && offset2 != -1) { // both non-flyweight
119
TestCase.assertEquals(messagePrefix(message)
120                     + "Offsets equal", offset1, offset2);
121         }
122     }
123     
124     public static void assertTokenFlyweight(Token token) {
125         TestCase.assertEquals("Token flyweight", true, token.isFlyweight());
126     }
127     
128     public static void assertTokenNotFlyweight(Token token) {
129         TestCase.assertEquals("Token not flyweight", true, !token.isFlyweight());
130     }
131     
132     private static void assertTokenOffsetMinusOneForFlyweight(boolean tokenFlyweight, int offset) {
133         if (tokenFlyweight) {
134             TestCase.assertEquals("Flyweight token => token.offset()=-1", -1, offset);
135         } else { // non-flyweight
136
TestCase.assertTrue("Non-flyweight token => token.offset()!=-1 but " + offset, (offset != -1));
137         }
138     }
139
140     /**
141      * Assert that the next token in the token sequence
142      */

143     public static void assertNextTokenEquals(TokenSequence<? extends TokenId> ts, TokenId id, String JavaDoc text) {
144         assertNextTokenEquals(null, ts, id, text);
145     }
146
147     public static void assertNextTokenEquals(String JavaDoc message, TokenSequence<? extends TokenId> ts, TokenId id, String JavaDoc text) {
148         String JavaDoc messagePrefix = messagePrefix(message);
149         TestCase.assertTrue(messagePrefix + "No next token available", ts.moveNext());
150         assertTokenEquals(message, ts, id, text, -1);
151     }
152     
153     /**
154      * @see #assertTokenSequencesEqual(String,TokenSequence,TokenHierarchy,TokenSequence,TokenHierarchy,boolean)
155      */

156     public static void assertTokenSequencesEqual(
157     TokenSequence<? extends TokenId> expected, TokenHierarchy<?> expectedHi,
158     TokenSequence<? extends TokenId> actual, TokenHierarchy<?> actualHi,
159     boolean testLookaheadAndState) {
160         assertTokenSequencesEqual(null, expected, expectedHi, actual, actualHi, testLookaheadAndState);
161     }
162
163     /**
164      * Compare contents of the given token sequences by moving through all their
165      * tokens.
166      * <br/>
167      * Token hierarchies are given to check implementations
168      * of the Token.offset(TokenHierarchy) - useful for checking of token snapshots.
169      *
170      * @param message message to display (may be null).
171      * @param expected non-null token sequence to be compared to the other token sequence.
172      * @param expectedHi token hierarchy to which expected relates.
173      * @param actual non-null token sequence to be compared to the other token sequence.
174      * @param actualHi token hierarchy to which actual relates.
175      * @param testLookaheadAndState whether lookahead and states should be checked
176      * or not. Generally it should be true but for snapshots checking it must
177      * be false because snapshots do not hold lookaheads and states.
178      */

179     public static void assertTokenSequencesEqual(String JavaDoc message,
180     TokenSequence<? extends TokenId> expected, TokenHierarchy<?> expectedHi,
181     TokenSequence<? extends TokenId> actual, TokenHierarchy<?> actualHi,
182     boolean testLookaheadAndState) {
183         boolean success = false;
184         try {
185             String JavaDoc prefix = messagePrefix(message);
186             TestCase.assertEquals(prefix + "Move previous: ", expected.movePrevious(), actual.movePrevious());
187             while (expected.moveNext()) {
188                 TestCase.assertTrue(prefix + "Move next: ", actual.moveNext());
189                 assertTokensEqual(message, expected, expectedHi, actual, actualHi, testLookaheadAndState);
190             }
191             TestCase.assertFalse(prefix + "Move next not disabled", actual.moveNext());
192             success = true;
193         } finally {
194             if (!success) {
195                 System.err.println("Expected token sequence dump:\n" + expected);
196                 System.err.println("Test token sequence dump:\n" + actual);
197             }
198         }
199     }
200
201     private static void assertTokensEqual(String JavaDoc message,
202     TokenSequence<? extends TokenId> ts, TokenHierarchy tokenHierarchy,
203     TokenSequence<? extends TokenId> ts2, TokenHierarchy tokenHierarchy2, boolean testLookaheadAndState) {
204         Token<? extends TokenId> t = ts.token();
205         Token<? extends TokenId> t2 = ts2.token();
206
207         message = messagePrefix(message);
208         TestCase.assertEquals(message + "Invalid token id", t.id(), t2.id());
209         assertTextEquals(message + "Invalid token text", t.text(), t2.text());
210         
211         assertTokenOffsetsEqual(message, t.offset(tokenHierarchy), t2.offset(tokenHierarchy2));
212         TestCase.assertEquals(message + "Invalid tokenSequence offset", ts.offset(), ts2.offset());
213
214         // Checking LOOKAHEAD and STATE matching in case they are filled in (during tests)
215
if (testing && testLookaheadAndState) {
216             TestCase.assertEquals(message + "Invalid token.lookahead()", lookahead(ts), lookahead(ts2));
217             TestCase.assertEquals(message + "Invalid token.state()", state(ts), state(ts2));
218         }
219         TestCase.assertEquals(message + "Invalid token length", t.length(), t2.length());
220         TestCase.assertEquals(message + "Invalid token part", t.partType(), t2.partType());
221     }
222     
223     /**
224      * Compute number of flyweight tokens in the given token sequence.
225      *
226      * @param ts non-null token sequence.
227      * @return number of flyweight tokens in the token sequence.
228      */

229     public static int flyweightTokenCount(TokenSequence<? extends TokenId> ts) {
230         int flyTokenCount = 0;
231         ts.moveIndex(0);
232         while (ts.moveNext()) {
233             if (ts.token().isFlyweight()) {
234                 flyTokenCount++;
235             }
236         }
237         return flyTokenCount;
238     }
239     
240     /**
241      * Compute total number of characters represented by flyweight tokens
242      * in the given token sequence.
243      *
244      * @param ts non-null token sequence.
245      * @return number of characters contained in the flyweight tokens
246      * in the token sequence.
247      */

248     public static int flyweightTextLength(TokenSequence<? extends TokenId> ts) {
249         int flyTokenTextLength = 0;
250         ts.moveIndex(0);
251         while (ts.moveNext()) {
252             if (ts.token().isFlyweight()) {
253                 flyTokenTextLength += ts.token().text().length();
254             }
255         }
256         return flyTokenTextLength;
257     }
258     
259     /**
260      * Compute distribution of flyweight token lengths accross the given token sequence.
261      *
262      * @param ts non-null token sequence.
263      * @return non-null list containing number of the flyweight tokens that have the length
264      * equal to the index in the list.
265      */

266     public static List JavaDoc<Integer JavaDoc> flyweightDistribution(TokenSequence<? extends TokenId> ts) {
267         List JavaDoc<Integer JavaDoc> distribution = new ArrayList JavaDoc<Integer JavaDoc>();
268         ts.moveIndex(0);
269         while (ts.moveNext()) {
270             if (ts.token().isFlyweight()) {
271                 int len = ts.token().text().length();
272                 while (distribution.size() <= len) {
273                     distribution.add(0);
274                 }
275                 distribution.set(len, distribution.get(len) + 1);
276             }
277         }
278         return distribution;
279     }
280     
281     public static boolean collectionsEqual(Collection JavaDoc<?> c1, Collection JavaDoc<?> c2) {
282         return c1.containsAll(c2) && c2.containsAll(c1);
283     }
284     
285     public static void assertCollectionsEqual(Collection JavaDoc expected, Collection JavaDoc actual) {
286         assertCollectionsEqual(null, expected, actual);
287     }
288
289     public static void assertCollectionsEqual(String JavaDoc message, Collection JavaDoc expected, Collection JavaDoc actual) {
290         if (!collectionsEqual(expected, actual)) {
291             message = messagePrefix(message);
292             for (Iterator JavaDoc it = expected.iterator(); it.hasNext();) {
293                 Object JavaDoc o = it.next();
294                 if (!actual.contains(o)) {
295                     System.err.println(actual.toString());
296                     TestCase.fail(message + " Object " + o + " not contained in tested collection");
297                 }
298             }
299             for (Iterator JavaDoc it = actual.iterator(); it.hasNext();) {
300                 Object JavaDoc o = it.next();
301                 if (!expected.contains(o)) {
302                     System.err.println(actual.toString());
303                     TestCase.fail(message + " Extra object " + o + " contained in tested collection");
304                 }
305             }
306             TestCase.fail("Collections not equal for unknown reason!");
307         }
308     }
309     
310     public static void incCheck(Document JavaDoc doc, boolean nested) {
311         TokenHierarchy<?> thInc = TokenHierarchy.get(doc);
312         Language<? extends TokenId> language = (Language<? extends TokenId>)
313                 doc.getProperty(Language.class);
314         String JavaDoc docText = null;
315         try {
316             docText = doc.getText(0, doc.getLength());
317         } catch (BadLocationException JavaDoc e) {
318             e.printStackTrace();
319             TestCase.fail("BadLocationException occurred");
320         }
321         TokenHierarchy<?> thBatch = TokenHierarchy.create(docText, language);
322         boolean success = false;
323         TokenSequence<?> batchTS = thBatch.tokenSequence();
324         try {
325             // Compare lookaheads and states as well
326
assertTokenSequencesEqual(batchTS, thBatch,
327                     thInc.tokenSequence(), thInc, true);
328             success = true;
329         } finally {
330             if (!success) {
331                 // Go forward two tokens to have an extra tokens context
332
batchTS.moveNext();
333                 batchTS.moveNext();
334                 System.err.println("BATCH token sequence dump:\n" + thBatch.tokenSequence());
335                 TokenHierarchy<?> lastHi = (TokenHierarchy<?>)doc.getProperty(LAST_TOKEN_HIERARCHY);
336                 if (lastHi != null) {
337                     System.err.println("PREVIOUS batch token sequence dump:\n" + lastHi.tokenSequence());
338                 }
339             }
340         }
341         
342         // Check the change since last modification
343
TokenHierarchy<?> lastHi = (TokenHierarchy<?>)doc.getProperty(LAST_TOKEN_HIERARCHY);
344         if (lastHi != null) {
345             // TODO comparison
346
}
347         doc.putProperty(LAST_TOKEN_HIERARCHY, thBatch); // new last batch token hierarchy
348
}
349     
350     /**
351      * Start to listen for changes in the token hierarchy.
352      */

353     public static void incInit(Document JavaDoc doc) {
354         TestCase.assertNull(doc.getProperty(TokenHierarchyListener.class));
355         doc.putProperty(TokenHierarchyListener.class, TestTokenChangeListener.INSTANCE);
356     }
357
358     /**
359      * Get lookahead for the token to which the token sequence is positioned.
360      * <br/>
361      * The method uses reflection to get reference to tokenList field in token sequence.
362      */

363     public static int lookahead(TokenSequence<? extends TokenId> ts) {
364         return tokenList(ts).lookahead(ts.index());
365     }
366
367     /**
368      * Get state for the token to which the token sequence is positioned.
369      * <br/>
370      * The method uses reflection to get reference to tokenList field in token sequence.
371      */

372     public static Object JavaDoc state(TokenSequence<? extends TokenId> ts) {
373         return tokenList(ts).state(ts.index());
374     }
375
376     /**
377      * Compare whether the two character sequences represent the same text.
378      */

379     public static boolean textEquals(CharSequence JavaDoc text1, CharSequence JavaDoc text2) {
380         return TokenUtilities.equals(text1, text2);
381     }
382     
383     public static void assertTextEquals(CharSequence JavaDoc expected, CharSequence JavaDoc actual) {
384         assertTextEquals(null, expected, actual);
385     }
386     
387     public static void assertTextEquals(String JavaDoc message, CharSequence JavaDoc expected, CharSequence JavaDoc actual) {
388         if (!textEquals(expected, actual)) {
389             TestCase.fail(messagePrefix(message) +
390                 " expected:\"" + expected + "\" but was:\"" + actual + "\"");
391         }
392     }
393     
394     /**
395      * Return the given text as String
396      * translating the special characters (and '\') into escape sequences.
397      *
398      * @param text non-null text to be debugged.
399      * @return non-null string containing the debug text.
400      */

401     public static String JavaDoc debugText(CharSequence JavaDoc text) {
402         return TokenUtilities.debugText(text);
403     }
404     
405     public static void initLastDocumentEventListening(Document JavaDoc doc) {
406         doc.addDocumentListener(new DocumentListener JavaDoc() {
407             public void insertUpdate(DocumentEvent JavaDoc evt) {
408                 storeEvent(evt);
409             }
410             public void removeUpdate(DocumentEvent JavaDoc evt) {
411                 storeEvent(evt);
412             }
413             public void changedUpdate(DocumentEvent JavaDoc evt) {
414                 storeEvent(evt);
415             }
416             private void storeEvent(DocumentEvent JavaDoc evt) {
417                 evt.getDocument().putProperty(DocumentEvent JavaDoc.class, evt);
418             }
419         });
420     }
421     
422     public static DocumentEvent JavaDoc getLastDocumentEvent(Document JavaDoc doc) {
423         return (DocumentEvent JavaDoc)doc.getProperty(DocumentEvent JavaDoc.class);
424     }
425     
426     public static void initLastTokenHierarchyEventListening(Document JavaDoc doc) {
427         TokenHierarchy hi = TokenHierarchy.get(doc);
428         hi.addTokenHierarchyListener(new TokenHierarchyListener() {
429             public void tokenHierarchyChanged(TokenHierarchyEvent evt) {
430                 ((Document JavaDoc)evt.tokenHierarchy().mutableInputSource()).putProperty(
431                         TokenHierarchyEvent.class, evt);
432             }
433         });
434     }
435     
436     public static TokenHierarchyEvent getLastTokenHierarchyEvent(Document JavaDoc doc) {
437         return (TokenHierarchyEvent)doc.getProperty(TokenHierarchyEvent.class);
438     }
439     
440     /**
441      * Get token list from the given token sequence for testing purposes.
442      */

443     public static <T extends TokenId> TokenList<T> tokenList(TokenSequence<T> ts) {
444         try {
445             if (tokenListField == null) {
446                 tokenListField = ts.getClass().getDeclaredField("tokenList");
447                 tokenListField.setAccessible(true);
448             }
449             @SuppressWarnings JavaDoc("unchecked")
450             TokenList<T> tl = (TokenList<T>)tokenListField.get(ts);
451             return tl;
452         } catch (Exception JavaDoc e) {
453             TestCase.fail(e.getMessage());
454             return null; // never reached
455
}
456     }
457     
458     private static String JavaDoc messagePrefix(String JavaDoc message) {
459         if (message != null) {
460             message = message + ": ";
461         } else {
462             message = "";
463         }
464         return message;
465     }
466
467     /**
468      * Set whether the lexer should run in testing mode where there are some
469      * additional correctness checks performed.
470      */

471     public static void setTesting(boolean testing) {
472         System.setProperty("netbeans.debug.lexer.test", testing ? "true" : "false");
473     }
474     
475     /**
476      * Check whether token descriptions dump file (a file with added suffix ".tokens.txt")
477      * for the given input file exists and whether it has the same content
478      * like the one obtained by lexing the input file.
479      * <br/>
480      * It allows to test whether the tested lexer still produces the same tokens.
481      * <br/>
482      * The method will only pass successfully if both the input file and token descriptions
483      * files exist and the token descriptions file contains the same information
484      * as the generated files.
485      * <br/>
486      * If the token descriptions file does not exist the method will create it.
487      * <br/>
488      * As the lexer's behavior at the EOF is important and should be well tested
489      * there is a support for splitting input file virtually into multiple inputs
490      * by virtual EOF - see <code>TokenDumpTokenId</code> for details.
491      * <br/>
492      * Also there is possibility to specify special chars
493      * - see <code>TokenDumpTokenId</code> for details.
494      *
495      * @param test non-null test (used for calling test.getDataDir()).
496      * @param relFilePath non-null file path relative to datadir of the test.
497      * <br/>
498      * For example if "testfiles/testinput.mylang.txt" gets passed the test method will
499      * search for <code>new File(test.getDataDir() + "testfiles/testinput.mylang.txt")</code>,
500      * read its content, lex it and create token descriptions. Then it will search for
501      * <code>new File(test.getDataDir() + "testfiles/testinput.mylang.txt.tokens.txt")</code>
502      * and it will compare the file content with the generated descriptions.
503      *
504      */

505     public static void checkTokenDump(NbTestCase test, String JavaDoc relFilePath,
506     Language<? extends TokenId> language) throws Exception JavaDoc {
507         TokenDumpCheck.checkTokenDump(test, relFilePath, language);
508     }
509     
510     private static final class TestTokenChangeListener implements TokenHierarchyListener {
511         
512         static TestTokenChangeListener INSTANCE = new TestTokenChangeListener();
513         
514         public void tokenHierarchyChanged(TokenHierarchyEvent evt) {
515             TokenHierarchy hi = evt.tokenHierarchy();
516             Document JavaDoc d = (Document JavaDoc)hi.mutableInputSource();
517             d.putProperty(TokenHierarchyEvent.class, evt);
518         }
519         
520     }
521 }
522
Popular Tags