KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > netbeans > lib > lexer > LexerInputOperation


1 /*
2  * The contents of this file are subject to the terms of the Common Development
3  * and Distribution License (the License). You may not use this file except in
4  * compliance with the License.
5  *
6  * You can obtain a copy of the License at http://www.netbeans.org/cddl.html
7  * or http://www.netbeans.org/cddl.txt.
8  *
9  * When distributing Covered Code, include this CDDL Header Notice in each file
10  * and include the License file at http://www.netbeans.org/cddl.txt.
11  * If applicable, add the following below the CDDL Header, with the fields
12  * enclosed by brackets [] replaced by your own identifying information:
13  * "Portions Copyrighted [year] [name of copyright owner]"
14  *
15  * The Original Software is NetBeans. The Initial Developer of the Original
16  * Software is Sun Microsystems, Inc. Portions Copyright 1997-2007 Sun
17  * Microsystems, Inc. All Rights Reserved.
18  */

19
20 package org.netbeans.lib.lexer;
21
22 import java.util.List JavaDoc;
23 import java.util.Set JavaDoc;
24 import org.netbeans.api.lexer.InputAttributes;
25 import org.netbeans.api.lexer.LanguagePath;
26 import org.netbeans.api.lexer.TokenId;
27 import org.netbeans.lib.editor.util.GapList;
28 import org.netbeans.lib.lexer.token.ComplexToken;
29 import org.netbeans.lib.lexer.token.PreprocessedTextToken;
30 import org.netbeans.spi.lexer.CharPreprocessor;
31 import org.netbeans.spi.lexer.Lexer;
32 import org.netbeans.spi.lexer.LexerInput;
33 import org.netbeans.lib.lexer.token.AbstractToken;
34 import org.netbeans.lib.lexer.token.ComplexToken;
35 import org.netbeans.spi.lexer.LexerRestartInfo;
36 import org.netbeans.spi.lexer.TokenFactory;
37
38 /**
39  * Implementation of the functionality related to lexer input.
40  *
41  * @author Miloslav Metelka
42  * @version 1.00
43  */

44
45 public abstract class LexerInputOperation<T extends TokenId> implements CharProvider {
46     
47     /** Flag for additional correctness checks (may degrade performance). */
48     private static final boolean testing = Boolean.getBoolean("netbeans.debug.lexer.test");
49     
50     /**
51      * Current reading index in the operation.
52      * At all times it must be &gt;=0.
53      */

54     private int readIndex;
55     
56     /**
57      * Maximum index from which the char was fetched for current
58      * (or previous) tokens recognition.
59      * <br>
60      * The index is updated lazily - only when EOF is reached
61      * and when backup() is called.
62      */

63     private int lookaheadIndex;
64     
65     /**
66      * Active preprocessor or null if there is no preprocessor.
67      */

68     private CharPreprocessorOperation preprocessorOperation;
69     
70     /**
71      * Computed and cached token length.
72      */

73     private int tokenLength;
74     
75     private final TokenList<T> tokenList;
76     
77     private final boolean mutableInput;
78     
79     private final Lexer<T> lexer;
80     
81     /**
82      * Start of the token being currently recognized.
83      */

84     private int tokenStartIndex;
85
86     private boolean lexerFinished;
87     
88     /**
89      * How many flyweight tokens were created in a row.
90      */

91     private int flySequenceLength;
92     
93     private List JavaDoc<CharPreprocessorError> preprocessErrorList;
94     
95     /**
96      * Total count of preprocessors used during lexing.
97      * It's used to determine whether extra preprocessed chars need to be used.
98      */

99     protected int preprocessingLevelCount;
100
101     private CharProvider.ExtraPreprocessedChars extraPreprocessedChars;
102
103     public LexerInputOperation(TokenList<T> tokenList, int tokenIndex, Object JavaDoc lexerRestartState) {
104         this.tokenList = tokenList;
105         this.mutableInput = (tokenList.modCount() != -1);
106         // Determine flySequenceLength setting
107
while (--tokenIndex >= 0 && LexerUtilsConstants.token(
108                 tokenList, tokenIndex).isFlyweight()
109         ) {
110             flySequenceLength++;
111         }
112         
113         LanguagePath languagePath = tokenList.languagePath();
114         LanguageOperation<T> languageOperation = LexerUtilsConstants.mostEmbeddedLanguageOperation(languagePath);
115         TokenFactory<T> tokenFactory = LexerSpiPackageAccessor.get().createTokenFactory(this);
116         
117         // Check whether character preprocessing is necessary
118
CharPreprocessor p = LexerSpiPackageAccessor.get().createCharPreprocessor(
119                 languageOperation.languageHierarchy());
120         if (p != null) {
121             preprocessingLevelCount++;
122             preprocessorOperation = new CharPreprocessorOperation(
123                     ((preprocessorOperation != null)
124                         ? (CharProvider)preprocessorOperation
125                         : this),
126                     p,
127                     this
128             );
129         }
130         
131         LexerInput lexerInput = LexerSpiPackageAccessor.get().createLexerInput(
132                 (preprocessorOperation != null) ? preprocessorOperation : this);
133
134         LexerRestartInfo<T> info = LexerSpiPackageAccessor.get().createLexerRestartInfo(
135                 lexerInput, tokenFactory, lexerRestartState,
136                 tokenList.languagePath(), inputAttributes());
137         lexer = LexerSpiPackageAccessor.get().createLexer(
138                 languageOperation.languageHierarchy(), info);
139     }
140
141     public abstract int read(int index);
142     
143     public abstract char readExisting(int index);
144     
145     public abstract void approveToken(AbstractToken<T> token);
146
147     public Set JavaDoc<T> skipTokenIds() {
148         return tokenList.skipTokenIds();
149     }
150     
151     public final int read() {
152         int c = read(readIndex++);
153         if (c == LexerInput.EOF) {
154             lookaheadIndex = readIndex; // count EOF char into lookahead
155
readIndex--; // readIndex must not include EOF
156
}
157         return c;
158     }
159     
160     public int deepRawLength(int length) {
161         // No preprocessing by default
162
return length;
163     }
164     
165     public int deepRawLengthShift(int index) {
166         // No preprocessing by default
167
return index;
168     }
169     
170     public final int readIndex() {
171         return readIndex;
172     }
173     
174     public final void backup(int count) {
175         if (lookaheadIndex < readIndex) {
176             lookaheadIndex = readIndex;
177         }
178         readIndex -= count;
179     }
180     
181     /**
182      * Get a distance between the index of the rightmost character already returned
183      * by previous {@link #read()} operations and the present read index.
184      * <br/>
185      * If there were no {@link #backup(int)} operation performed
186      * the lookahead will be zero except the case when EOF was already returned.
187      *
188      * @return &gt;=0 number of characters between the rightmost reading index reached
189      * and the present read position.
190      * <br/>
191      * The EOF (when reached by reading) is treated as a single character
192      * in lookahead.
193      * <br/>
194      * If there is an active character preprocessor the returned value
195      * is a raw length of the lookahead.
196      */

197     public final int lookahead() {
198         return (lookaheadIndex > readIndex)
199                 ? ((preprocessorOperation != null)
200                         ? preprocessorOperation.deepRawLength(lookaheadIndex - readIndex)
201                         : (lookaheadIndex - readIndex))
202                 : 0;
203     }
204     
205     public final int tokenLength() {
206         return tokenLength;
207     }
208     
209     public void tokenRecognized(int tokenLength) {
210         if (tokenLength > readIndex()) {
211             throw new IndexOutOfBoundsException JavaDoc("tokenLength=" + tokenLength // NOI18N
212
+ " >" + readIndex());
213         }
214         this.tokenLength = tokenLength;
215     }
216     
217     public void tokenApproved() {
218         tokenStartIndex += tokenLength;
219         readIndex -= tokenLength;
220         lookaheadIndex -= tokenLength;
221     }
222     
223     protected final TokenList<T> tokenList() {
224         return tokenList;
225     }
226     
227     protected final int tokenStartIndex() {
228         return tokenStartIndex;
229     }
230
231     public final void setTokenStartIndex(int tokenStartIndex) {
232         this.tokenStartIndex = tokenStartIndex;
233     }
234
235     protected final CharPreprocessorOperation preprocessor() {
236         return preprocessorOperation;
237     }
238     
239     public final boolean isMutableInput() {
240         return mutableInput;
241     }
242     
243     public final boolean isStoreLookaheadAndState() {
244         return isMutableInput() || testing;
245     }
246     
247     public AbstractToken<T> nextToken() {
248         assert (!lexerFinished);
249         while (true) {
250             @SuppressWarnings JavaDoc("unchecked")
251             AbstractToken<T> token = (AbstractToken<T>)lexer().nextToken();
252             if (token == null) {
253                 LexerUtilsConstants.checkLexerInputFinished(
254                         (preprocessorOperation != null) ? (CharProvider)preprocessorOperation : this, this);
255                 lexerFinished = true;
256                 return null;
257             } else {
258                 approveToken(token);
259             }
260             if (token == TokenFactory.SKIP_TOKEN)
261                 continue; // Fetch next token
262
return token;
263         }
264     }
265     
266     /**
267      * Notification that the token was recognized.
268      * @param tokenLength length of the recognized token.
269      * @param skip whether the token should be skipped
270      * @return true if the token holding preprocessed text should be created.
271      * If skip is true then false is returned.
272      */

273     public final boolean tokenRecognized(int tokenLength, boolean skip) {
274         if (preprocessorOperation != null) {
275             preprocessorOperation.tokenRecognized(tokenLength);
276         } else { // no preprocessor
277
tokenRecognized(tokenLength);
278         }
279
280         // If the token is not skipped check whether preprocessed token
281
// should be created instead of the regular token.
282
if (!skip && tokenLength != this.tokenLength
283                 || (preprocessErrorList != null
284                     && preprocessErrorList.get(0).index() < this.tokenLength)
285         ) {
286             if (extraPreprocessedChars == null && preprocessingLevelCount > 1) {
287                 // For more than one preprocessing level need to handle
288
// extra preprocessed chars before and after the main ones
289
// on the parent levels.
290
extraPreprocessedChars = new CharProvider.ExtraPreprocessedChars();
291             }
292             return true;
293         }
294         return false;
295     }
296     
297     public void notifyPreprocessorError(CharPreprocessorError error) {
298         if (preprocessErrorList == null) {
299             preprocessErrorList = new GapList<CharPreprocessorError>();
300         }
301         preprocessErrorList.add(error);
302     }
303
304     public final void initPreprocessedToken(AbstractToken<T> token) {
305         CharPreprocessorError error = null;
306         if (preprocessErrorList != null && preprocessErrorList.size() > 0) {
307             for (int i = preprocessErrorList.size() - 1; i >= 0; i--) {
308                 error = preprocessErrorList.get(i);
309                 if (error.index() < tokenLength) {
310                     preprocessErrorList.remove(i);
311                 } else {// Above errors for this token
312
// Relocate - subtract token length
313
error.updateIndex(-tokenLength);
314                     error = null;
315                 }
316             }
317         }
318         
319         PreprocessedTextStorage storage = preprocessorOperation.createPreprocessedTextStorage(
320                 token.text(), extraPreprocessedChars);
321         
322         if (token.getClass() == ComplexToken.class) {
323             ((ComplexToken)token).initPrep(storage, error);
324         } else {
325             ((PreprocessedTextToken)token).initPrep(storage, error);
326         }
327     }
328     
329     public void collectExtraPreprocessedChars(CharProvider.ExtraPreprocessedChars epc,
330     int prepStartIndex, int prepEndIndex, int topPrepEndIndex) {
331         // No extra preprocessed characters
332
}
333     
334     public final LanguageOperation<T> languageOperation() {
335         return LexerUtilsConstants.mostEmbeddedLanguageOperation(tokenList.languagePath());
336     }
337     
338     public final Object JavaDoc lexerState() {
339         return lexer.state();
340     }
341
342     public final boolean isFlyTokenAllowed() {
343         return (flySequenceLength < LexerUtilsConstants.MAX_FLY_SEQUENCE_LENGTH);
344     }
345     
346     protected final void flyTokenAdded() {
347         flySequenceLength++;
348     }
349     
350     protected final void preventFlyToken() {
351         flySequenceLength = LexerUtilsConstants.MAX_FLY_SEQUENCE_LENGTH;
352     }
353     
354     protected final void clearFlySequence() {
355         flySequenceLength = 0;
356     }
357     
358     protected final boolean isSkipToken(AbstractToken<T> token) {
359         return (token == TokenFactory.SKIP_TOKEN);
360     }
361     
362     public final Lexer lexer() {
363         return lexer;
364     }
365     
366     public final InputAttributes inputAttributes() {
367         return tokenList.inputAttributes();
368     }
369     
370     public final void release() {
371         lexer.release();
372     }
373     
374 }
375
Popular Tags