KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > netbeans > lib > lexer > inc > IncTokenList


1 /*
2  * The contents of this file are subject to the terms of the Common Development
3  * and Distribution License (the License). You may not use this file except in
4  * compliance with the License.
5  *
6  * You can obtain a copy of the License at http://www.netbeans.org/cddl.html
7  * or http://www.netbeans.org/cddl.txt.
8  *
9  * When distributing Covered Code, include this CDDL Header Notice in each file
10  * and include the License file at http://www.netbeans.org/cddl.txt.
11  * If applicable, add the following below the CDDL Header, with the fields
12  * enclosed by brackets [] replaced by your own identifying information:
13  * "Portions Copyrighted [year] [name of copyright owner]"
14  *
15  * The Original Software is NetBeans. The Initial Developer of the Original
16  * Software is Sun Microsystems, Inc. Portions Copyright 1997-2007 Sun
17  * Microsystems, Inc. All Rights Reserved.
18  */

19
20 package org.netbeans.lib.lexer.inc;
21
22 import java.util.List JavaDoc;
23 import java.util.Set JavaDoc;
24 import org.netbeans.api.lexer.LanguagePath;
25 import org.netbeans.lib.lexer.LAState;
26 import org.netbeans.lib.lexer.LexerSpiPackageAccessor;
27 import org.netbeans.lib.lexer.TextLexerInputOperation;
28 import org.netbeans.lib.lexer.TokenList;
29 import org.netbeans.lib.editor.util.FlyOffsetGapList;
30 import org.netbeans.lib.lexer.EmbeddingContainer;
31 import org.netbeans.lib.lexer.LexerInputOperation;
32 import org.netbeans.lib.lexer.LexerUtilsConstants;
33 import org.netbeans.api.lexer.InputAttributes;
34 import org.netbeans.api.lexer.Token;
35 import org.netbeans.api.lexer.TokenId;
36 import org.netbeans.lib.lexer.TokenHierarchyOperation;
37 import org.netbeans.spi.lexer.MutableTextInput;
38 import org.netbeans.lib.lexer.token.AbstractToken;
39 import org.netbeans.lib.lexer.token.TextToken;
40
41
42 /**
43  * Incremental token list maintains a list of tokens
44  * at the root language level.
45  * <br/>
46  * The physical storage contains a gap to speed up list modifications
47  * during typing in a document when tokens are typically added/removed
48  * at the same index in the list.
49  *
50  * <p>
51  * There is an intent to not degrade performance significantly
52  * with each extra language embedding level so the token list maintains direct
53  * link to the root level.
54  *
55  * @author Miloslav Metelka
56  * @version 1.00
57  */

58
59 public final class IncTokenList<T extends TokenId>
60 extends FlyOffsetGapList<Object JavaDoc> implements MutableTokenList<T> {
61     
62     private final TokenHierarchyOperation<?,T> tokenHierarchyOperation;
63
64     private final MutableTextInput<?> mutableTextInput;
65     
66     private final LanguagePath languagePath;
67     
68     private final InputAttributes inputAttributes;
69     
70     private CharSequence JavaDoc text;
71     
72     /**
73      * Lexer input operation used for lexing of the input.
74      */

75     private LexerInputOperation<T> lexerInputOperation;
76     
77     private int rootModCount;
78
79     private LAState laState;
80     
81     
82     public IncTokenList(TokenHierarchyOperation<?,T> tokenHierarchyOperation,
83     MutableTextInput<?> mutableTextInput) {
84         this.tokenHierarchyOperation = tokenHierarchyOperation;
85         this.mutableTextInput = mutableTextInput;
86         this.languagePath = LanguagePath.get(
87                 LexerSpiPackageAccessor.get().language(mutableTextInput));
88         this.inputAttributes = LexerSpiPackageAccessor.get().inputAttributes(mutableTextInput);
89         this.text = LexerSpiPackageAccessor.get().text(mutableTextInput);
90         this.laState = LAState.empty();
91         initLexing();
92     }
93     
94     private void initLexing() {
95         this.lexerInputOperation = new TextLexerInputOperation<T>(this, text);
96     }
97     
98     public LanguagePath languagePath() {
99         return languagePath;
100     }
101
102     public synchronized int tokenCount() {
103         if (lexerInputOperation != null) { // still lexing
104
tokenOrEmbeddingContainerImpl(Integer.MAX_VALUE);
105         }
106         return size();
107     }
108
109     public char childTokenCharAt(int rawOffset, int index) {
110         return text.charAt(childTokenOffset(rawOffset) + index);
111     }
112     
113     public int childTokenOffset(int rawOffset) {
114         return (rawOffset < offsetGapStart()
115                 ? rawOffset
116                 : rawOffset - offsetGapLength());
117     }
118     
119     public int tokenOffset(int index) {
120         return elementOffset(index);
121     }
122     
123     public int existingTokensEndOffset() {
124         return elementOrEndOffset(tokenCountCurrent());
125     }
126
127     /**
128      * Get modification count for which this token list was last updated
129      * (mainly its cached start offset).
130      */

131     public int modCount() {
132         return rootModCount;
133     }
134     
135     public void incrementModCount() {
136         rootModCount++;
137     }
138     
139     public synchronized Object JavaDoc tokenOrEmbeddingContainer(int index) {
140         return tokenOrEmbeddingContainerImpl(index);
141     }
142     
143     private Object JavaDoc tokenOrEmbeddingContainerImpl(int index) {
144         while (lexerInputOperation != null && index >= size()) {
145             Token token = lexerInputOperation.nextToken();
146             if (token != null) { // lexer returned valid token
147
updateElementOffsetAdd(token);
148                 add(token);
149                 laState = laState.add(lexerInputOperation.lookahead(),
150                         lexerInputOperation.lexerState());
151             } else { // no more tokens from lexer
152
lexerInputOperation.release();
153                 lexerInputOperation = null;
154                 trimToSize();
155                 laState.trimToSize();
156             }
157         }
158         return (index < size()) ? get(index) : null;
159     }
160     
161     public synchronized AbstractToken<T> replaceFlyToken(
162     int index, AbstractToken<T> flyToken, int offset) {
163         TextToken<T> nonFlyToken = ((TextToken<T>)flyToken).createCopy(this, offset2Raw(offset));
164         set(index, nonFlyToken);
165         return nonFlyToken;
166     }
167
168     public synchronized void wrapToken(int index, EmbeddingContainer embeddingContainer) {
169         set(index, embeddingContainer);
170     }
171
172     public InputAttributes inputAttributes() {
173         return inputAttributes;
174     }
175     
176     protected int elementRawOffset(Object JavaDoc elem) {
177         return LexerUtilsConstants.token(elem).rawOffset();
178     }
179  
180     protected void setElementRawOffset(Object JavaDoc elem, int rawOffset) {
181         LexerUtilsConstants.token(elem).setRawOffset(rawOffset);
182     }
183     
184     protected boolean isElementFlyweight(Object JavaDoc elem) {
185         // token wrapper always contains non-flyweight token
186
return (elem.getClass() != EmbeddingContainer.class)
187             && ((AbstractToken)elem).isFlyweight();
188     }
189     
190     protected int elementLength(Object JavaDoc elem) {
191         return LexerUtilsConstants.token(elem).length();
192     }
193     
194     private AbstractToken<T> existingToken(int index) {
195         // Must use synced tokenOrEmbeddingContainer() because of possible change
196
// of the underlying list impl when adding lazily requested tokens
197
return LexerUtilsConstants.token(tokenOrEmbeddingContainer(index));
198     }
199
200     public Object JavaDoc tokenOrEmbeddingContainerUnsync(int index) {
201         // Solely for token list updater or token hierarchy snapshots
202
// having single-threaded exclusive write access
203
return get(index);
204     }
205     
206     public int lookahead(int index) {
207         return laState.lookahead(index);
208     }
209
210     public Object JavaDoc state(int index) {
211         return laState.state(index);
212     }
213
214     public int tokenCountCurrent() {
215         return size();
216     }
217
218     public TokenList<? extends TokenId> root() {
219         return this;
220     }
221
222     public TokenHierarchyOperation<?,? extends TokenId> tokenHierarchyOperation() {
223         return tokenHierarchyOperation;
224     }
225     
226     public LexerInputOperation<T> createLexerInputOperation(
227     int tokenIndex, int relexOffset, Object JavaDoc relexState) {
228         // Used for mutable lists only so maintain LA and state
229
return new TextLexerInputOperation<T>(this, tokenIndex, relexState,
230                 text, 0, relexOffset, text.length());
231     }
232
233     public boolean isFullyLexed() {
234         return (lexerInputOperation == null);
235     }
236
237     public void replaceTokens(TokenHierarchyEventInfo eventInfo,
238     TokenListChange<T> change, int removeTokenCount) {
239         int index = change.index();
240         // Remove obsolete tokens (original offsets are retained)
241
Object JavaDoc[] removedTokensOrBranches = new Object JavaDoc[removeTokenCount];
242         copyElements(index, index + removeTokenCount, removedTokensOrBranches, 0);
243         int offset = change.offset();
244         for (int i = 0; i < removeTokenCount; i++) {
245             Object JavaDoc tokenOrEmbeddingContainer = removedTokensOrBranches[i];
246             AbstractToken<T> token = LexerUtilsConstants.token(tokenOrEmbeddingContainer);
247             if (!token.isFlyweight()) {
248                 updateElementOffsetRemove(token);
249                 token.setTokenList(null);
250             }
251             offset += token.length();
252         }
253         remove(index, removeTokenCount); // Retain original offsets
254
laState.remove(index, removeTokenCount); // Remove lookaheads and states
255
change.setRemovedTokens(removedTokensOrBranches);
256         change.setRemovedEndOffset(offset);
257
258         // Move and fix the gap according to the performed modification.
259
int diffLength = eventInfo.insertedLength() - eventInfo.removedLength();
260         if (offsetGapStart() != change.offset()) {
261             // Minimum of the index of the first removed index and original computed index
262
moveOffsetGap(change.offset(), Math.min(index, change.offsetGapIndex()));
263         }
264         updateOffsetGapLength(-diffLength);
265
266         // Add created tokens.
267
List JavaDoc<AbstractToken<T>> addedTokens = change.addedTokens();
268         if (addedTokens != null) {
269             for (int i = 0; i < addedTokens.size(); i++) {
270                 AbstractToken<T> token = addedTokens.get(i);
271                 updateElementOffsetAdd(token);
272             }
273             addAll(index, addedTokens);
274             laState = laState.addAll(index, change.laState());
275             change.syncAddedTokenCount();
276         }
277     }
278     
279     private void releaseLexerInputOperation() {
280         if (lexerInputOperation != null)
281             lexerInputOperation.release();
282     }
283
284     public void refreshLexerInputOperation() {
285         releaseLexerInputOperation();
286         int lastTokenIndex = tokenCountCurrent() - 1;
287         lexerInputOperation = createLexerInputOperation(
288                 lastTokenIndex + 1,
289                 existingTokensEndOffset(),
290                 (lastTokenIndex >= 0) ? state(lastTokenIndex) : null
291         );
292     }
293     
294     public void restartLexing() {
295         // Called when tokens were fully removed and lexing should be restarted
296
releaseLexerInputOperation();
297         initLexing();
298     }
299     
300     public boolean isContinuous() {
301         return true;
302     }
303
304     public Set JavaDoc<T> skipTokenIds() {
305         return null;
306     }
307
308     public String JavaDoc toString() {
309         return LexerUtilsConstants.appendTokenList(null, this, -1).toString();
310     }
311
312 }
313
Popular Tags