KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > netbeans > lib > lexer > EmbeddedTokenList


1 /*
2  * The contents of this file are subject to the terms of the Common Development
3  * and Distribution License (the License). You may not use this file except in
4  * compliance with the License.
5  *
6  * You can obtain a copy of the License at http://www.netbeans.org/cddl.html
7  * or http://www.netbeans.org/cddl.txt.
8  *
9  * When distributing Covered Code, include this CDDL Header Notice in each file
10  * and include the License file at http://www.netbeans.org/cddl.txt.
11  * If applicable, add the following below the CDDL Header, with the fields
12  * enclosed by brackets [] replaced by your own identifying information:
13  * "Portions Copyrighted [year] [name of copyright owner]"
14  *
15  * The Original Software is NetBeans. The Initial Developer of the Original
16  * Software is Sun Microsystems, Inc. Portions Copyright 1997-2007 Sun
17  * Microsystems, Inc. All Rights Reserved.
18  */

19
20 package org.netbeans.lib.lexer;
21
22 import java.util.List JavaDoc;
23 import java.util.Set JavaDoc;
24 import org.netbeans.api.lexer.LanguagePath;
25 import org.netbeans.lib.editor.util.FlyOffsetGapList;
26 import org.netbeans.lib.lexer.inc.MutableTokenList;
27 import org.netbeans.api.lexer.InputAttributes;
28 import org.netbeans.api.lexer.Token;
29 import org.netbeans.api.lexer.TokenId;
30 import org.netbeans.lib.lexer.inc.TokenHierarchyEventInfo;
31 import org.netbeans.lib.lexer.inc.TokenListChange;
32 import org.netbeans.spi.lexer.LanguageEmbedding;
33 import org.netbeans.lib.lexer.token.AbstractToken;
34 import org.netbeans.lib.lexer.token.TextToken;
35
36
37 /**
38  * Embedded token list maintains a list of tokens
39  * on a particular embedded language level .
40  * <br>
41  * The physical storage contains a gap to speed up list modifications
42  * during typing in a document when tokens are typically added/removed
43  * at the same index in the list.
44  *
45  * <p>
46  * There is an intent to not degrade performance significantly
47  * with each extra language embedding level so the token list maintains direct
48  * link to the root level.
49  *
50  * @author Miloslav Metelka
51  * @version 1.00
52  */

53
54 public final class EmbeddedTokenList<T extends TokenId>
55 extends FlyOffsetGapList<Object JavaDoc> implements MutableTokenList<T> {
56     
57     /** Flag for additional correctness checks (may degrade performance). */
58     private static final boolean testing = Boolean.getBoolean("netbeans.debug.lexer.test");
59
60     /**
61      * Embedding container carries info about the token into which this
62      * token list is embedded.
63      */

64     private final EmbeddingContainer<? extends TokenId> embeddingContainer; // 36 bytes (32-super + 4)
65

66     /**
67      * Language embedding for this embedded token list.
68      */

69     private final LanguageEmbedding<T> embedding; // 40 bytes
70

71     /**
72      * Language path of this token list.
73      */

74     private final LanguagePath languagePath; // 44 bytes
75

76     /**
77      * Storage for lookaheads and states.
78      * <br/>
79      * It's non-null only initialized for mutable token lists
80      * or when in testing environment.
81      */

82     private LAState laState; // 48 bytes
83

84     /**
85      * Next embedded token list forming a single-linked list.
86      */

87     private EmbeddedTokenList<? extends TokenId> nextEmbedding; // 52 bytes
88

89     public EmbeddedTokenList(EmbeddingContainer<? extends TokenId> embeddingContainer,
90     LanguagePath languagePath, LanguageEmbedding<T> embedding,
91     EmbeddedTokenList<? extends TokenId> nextEmbedding) {
92         this.embeddingContainer = embeddingContainer;
93         this.languagePath = languagePath;
94         this.embedding = embedding;
95         this.nextEmbedding = nextEmbedding;
96
97         if (embeddingContainer.rootTokenList().modCount() != -1 || testing) {
98             this.laState = LAState.empty(); // Store lookaheads and states
99
}
100         
101         init();
102     }
103
104     private void init() {
105         // Lex the whole input represented by token at once
106
LexerInputOperation<T> lexerInputOperation = createLexerInputOperation(
107                 0, startOffset(), null);
108         AbstractToken<T> token = lexerInputOperation.nextToken();
109         while (token != null) {
110             updateElementOffsetAdd(token); // must subtract startOffset()
111
add(token);
112             if (laState != null) {
113                 laState = laState.add(lexerInputOperation.lookahead(),
114                         lexerInputOperation.lexerState());
115             }
116             token = lexerInputOperation.nextToken();
117         }
118         lexerInputOperation.release();
119         lexerInputOperation = null;
120
121         trimToSize(); // Compact storage
122
if (laState != null)
123             laState.trimToSize();
124     }
125     
126     EmbeddedTokenList<? extends TokenId> nextEmbedding() {
127         return nextEmbedding;
128     }
129     
130     void setNextEmbedding(EmbeddedTokenList<? extends TokenId> nextEmbedding) {
131         this.nextEmbedding = nextEmbedding;
132     }
133     
134     public LanguagePath languagePath() {
135         return languagePath;
136     }
137
138     public int tokenCount() {
139         // initialized at once so no need to check whether lexing is finished
140
return size();
141     }
142     
143     public synchronized Object JavaDoc tokenOrEmbeddingContainer(int index) {
144         // Assuming all the token are lexed since begining and after updates
145
return (index < size()) ? get(index) : null;
146     }
147     
148     private Token existingToken(int index) {
149         // Tokens not created lazily -> use regular unsync tokenOrEmbeddingContainer()
150
return LexerUtilsConstants.token(tokenOrEmbeddingContainer(index));
151     }
152     
153     public int lookahead(int index) {
154         return (laState != null) ? laState.lookahead(index) : -1;
155     }
156
157     public Object JavaDoc state(int index) {
158         return (laState != null) ? laState.state(index) : null;
159     }
160
161     /**
162      * Returns absolute offset of the token at the given index
163      * (startOffset gets added to the child token's real offset).
164      * <br/>
165      * For token hierarchy snapshots the returned value is corrected
166      * in the TokenSequence explicitly by adding TokenSequence.tokenOffsetDiff.
167      */

168     public int tokenOffset(int index) {
169         return elementOffset(index);
170     }
171
172     public int childTokenOffset(int rawOffset) {
173         // Need to make sure that the startOffset is up-to-date
174
embeddingContainer.updateOffsets();
175         return embeddingContainer.tokenStartOffset() + embedding.startSkipLength()
176             + childTokenRelOffset(rawOffset);
177     }
178
179     /**
180      * Get difference between start offset of the particular child token
181      * against start offset of the root token.
182      */

183     public int childTokenOffsetShift(int rawOffset) {
184         // Need to make sure that the startOffsetShift is up-to-date
185
updateStartOffset();
186         return embeddingContainer.rootTokenOffsetShift() + childTokenRelOffset(rawOffset);
187     }
188
189     /**
190      * Get child token's real offset which is always a relative value
191      * to startOffset value.
192      */

193     private int childTokenRelOffset(int rawOffset) {
194         return (rawOffset < offsetGapStart())
195                 ? rawOffset
196                 : rawOffset - offsetGapLength();
197     }
198
199     public char childTokenCharAt(int rawOffset, int index) {
200         // Do not update the start offset shift - the token.text()
201
// did it before returning its result and its contract
202
// specifies that.
203
// Return chars by delegating to rootToken
204
return embeddingContainer.charAt(
205                 embedding.startSkipLength() + childTokenRelOffset(rawOffset) + index);
206     }
207
208     public int modCount() {
209         // Delegate to root to have the most up-to-date value for token sequence's check.
210
return root().modCount();
211     }
212     
213     protected int startOffset() { // used by FlyOffsetGapList
214
return embeddingContainer.tokenStartOffset() + embedding.startSkipLength();
215     }
216     
217     public void updateStartOffset() {
218         embeddingContainer.updateOffsets();
219     }
220     
221     public TokenList<? extends TokenId> root() {
222         return embeddingContainer.rootTokenList();
223     }
224     
225     public TokenHierarchyOperation<?,? extends TokenId> tokenHierarchyOperation() {
226         return root().tokenHierarchyOperation();
227     }
228     
229     public AbstractToken<? extends TokenId> rootToken() {
230         return embeddingContainer.rootToken();
231     }
232
233     protected int elementRawOffset(Object JavaDoc elem) {
234         return (elem.getClass() == EmbeddingContainer.class)
235             ? ((EmbeddingContainer)elem).token().rawOffset()
236             : ((AbstractToken<? extends TokenId>)elem).rawOffset();
237     }
238
239     protected void setElementRawOffset(Object JavaDoc elem, int rawOffset) {
240         if (elem.getClass() == EmbeddingContainer.class)
241             ((EmbeddingContainer)elem).token().setRawOffset(rawOffset);
242         else
243             ((AbstractToken<? extends TokenId>)elem).setRawOffset(rawOffset);
244     }
245     
246     protected boolean isElementFlyweight(Object JavaDoc elem) {
247         // token wrapper always contains non-flyweight token
248
return (elem.getClass() != EmbeddingContainer.class)
249             && ((AbstractToken<? extends TokenId>)elem).isFlyweight();
250     }
251     
252     protected int elementLength(Object JavaDoc elem) {
253         return LexerUtilsConstants.token(elem).length();
254     }
255     
256     public synchronized AbstractToken<T> replaceFlyToken(
257     int index, AbstractToken<T> flyToken, int offset) {
258         TextToken<T> nonFlyToken = ((TextToken<T>)flyToken).createCopy(this, offset2Raw(offset));
259         set(index, nonFlyToken);
260         return nonFlyToken;
261     }
262
263     public synchronized void wrapToken(int index, EmbeddingContainer embeddingContainer) {
264         set(index, embeddingContainer);
265     }
266
267     public InputAttributes inputAttributes() {
268         return root().inputAttributes();
269     }
270
271     // MutableTokenList extra methods
272
public Object JavaDoc tokenOrEmbeddingContainerUnsync(int index) {
273         return get(index);
274     }
275
276     public int tokenCountCurrent() {
277         return size();
278     }
279
280     public LexerInputOperation<T> createLexerInputOperation(
281     int tokenIndex, int relexOffset, Object JavaDoc relexState) {
282         CharSequence JavaDoc tokenText = embeddingContainer.token().text();
283         int tokenStartOffset = embeddingContainer.tokenStartOffset();
284         int endOffset = tokenStartOffset + tokenText.length()
285             - embedding.endSkipLength();
286         // Do not need to update offset - clients
287
// (constructor or token list updater) call updateStartOffset()
288
// before calling this method
289
return new TextLexerInputOperation<T>(this, tokenIndex, relexState, tokenText,
290                 tokenStartOffset, relexOffset, endOffset);
291     }
292
293     public boolean isFullyLexed() {
294         return true;
295     }
296
297     public void replaceTokens(TokenHierarchyEventInfo eventInfo,
298     TokenListChange<T> change, int removeTokenCount) {
299         int index = change.index();
300         // Remove obsolete tokens (original offsets are retained)
301
Object JavaDoc[] removedTokensOrEmbeddingContainers = new Object JavaDoc[removeTokenCount];
302         copyElements(index, index + removeTokenCount, removedTokensOrEmbeddingContainers, 0);
303         int offset = change.offset();
304         for (int i = 0; i < removeTokenCount; i++) {
305             Object JavaDoc tokenOrEmbeddingContainer = removedTokensOrEmbeddingContainers[i];
306             AbstractToken<T> token = LexerUtilsConstants.token(tokenOrEmbeddingContainer);
307             if (!token.isFlyweight()) {
308                 updateElementOffsetRemove(token);
309                 token.setTokenList(null);
310             }
311             offset += token.length();
312         }
313         remove(index, removeTokenCount); // Retain original offsets
314
laState.remove(index, removeTokenCount); // Remove lookaheads and states
315
change.setRemovedTokens(removedTokensOrEmbeddingContainers);
316         change.setRemovedEndOffset(offset);
317
318         // Move and fix the gap according to the performed modification.
319
int diffLength = eventInfo.insertedLength() - eventInfo.removedLength();
320         if (offsetGapStart() != change.offset()) {
321             // Minimum of the index of the first removed index and original computed index
322
moveOffsetGap(change.offset(), Math.min(index, change.offsetGapIndex()));
323         }
324         updateOffsetGapLength(-diffLength);
325
326         // Add created tokens.
327
List JavaDoc<AbstractToken<T>> addedTokens = change.addedTokens();
328         if (addedTokens != null) {
329             for (Token token : addedTokens) {
330                 updateElementOffsetAdd(token);
331             }
332             addAll(index, addedTokens);
333             laState = laState.addAll(index, change.laState());
334             change.syncAddedTokenCount();
335         }
336     }
337
338     public boolean isContinuous() {
339         return true;
340     }
341
342     public Set JavaDoc<T> skipTokenIds() {
343         return null;
344     }
345
346     public String JavaDoc toString() {
347         return LexerUtilsConstants.appendTokenList(null, this, -1).toString();
348     }
349
350 }
351
Popular Tags