KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > netbeans > lib > lexer > SubSequenceTokenList


1 /*
2  * The contents of this file are subject to the terms of the Common Development
3  * and Distribution License (the License). You may not use this file except in
4  * compliance with the License.
5  *
6  * You can obtain a copy of the License at http://www.netbeans.org/cddl.html
7  * or http://www.netbeans.org/cddl.txt.
8  *
9  * When distributing Covered Code, include this CDDL Header Notice in each file
10  * and include the License file at http://www.netbeans.org/cddl.txt.
11  * If applicable, add the following below the CDDL Header, with the fields
12  * enclosed by brackets [] replaced by your own identifying information:
13  * "Portions Copyrighted [year] [name of copyright owner]"
14  *
15  * The Original Software is NetBeans. The Initial Developer of the Original
16  * Software is Sun Microsystems, Inc. Portions Copyright 1997-2007 Sun
17  * Microsystems, Inc. All Rights Reserved.
18  */

19
20 package org.netbeans.lib.lexer;
21
22 import java.util.Set JavaDoc;
23 import org.netbeans.api.lexer.InputAttributes;
24 import org.netbeans.api.lexer.LanguagePath;
25 import org.netbeans.api.lexer.TokenId;
26 import org.netbeans.lib.lexer.token.AbstractToken;
27
28 /**
29  * Filtering token list used by a token sub sequence.
30  * <br/>
31  * As the tokens are created lazily this list won't call tokenList.tokenCount()
32  * until tokenCount() is called on itself.
33  *
34  * <p>
35  * This list assumes single-threaded use only.
36  * </p>
37  *
38  * @author Miloslav Metelka
39  * @version 1.00
40  */

41
42 public final class SubSequenceTokenList<T extends TokenId> implements TokenList<T> {
43     
44     /**
45      * Token list to which this filtering token list delegates.
46      */

47     private TokenList<T> tokenList;
48     
49     /**
50      * Last retrieved token's end offset.
51      */

52     private AbstractToken<T> lastToken;
53     
54     /**
55      * Last retrieved token index.
56      */

57     private int lastTokenIndex;
58     
59     /**
60      * Last retrieved token's offset.
61      */

62     private int lastTokenOffset;
63     
64     /**
65      * Limit of start offset under which the token sequence cannot move.
66      * Integer.MIN_VALUE for no limit.
67      */

68     private final int limitStartOffset;
69     
70     /**
71      * Limit of the end offset under which the token sequence cannot move.
72      * Integer.MAX_VALUE for no limit.
73      */

74     private final int limitEndOffset;
75
76     /**
77      * Index of a first token in the underlying token list that this list provides.
78      */

79     private int limitStartIndex;
80     
81     /**
82      * Initially Integer.MAX_VALUE to be computed lazily.
83      */

84     private int limitEndIndex;
85     
86     public SubSequenceTokenList(TokenList<T> tokenList, int limitStartOffset, int limitEndOffset) {
87         this.tokenList = tokenList;
88         this.limitStartOffset = limitStartOffset;
89         this.limitEndOffset = limitEndOffset;
90         
91         // Compute limitStartIndex
92
if (limitStartOffset > 0) {
93             int diff = move(limitStartOffset);
94             if (diff != Integer.MAX_VALUE) { // some tokens exist
95
if (diff >= lastToken.length()) { // lastToken initialized in move()
96
lastTokenIndex++;
97                     Object JavaDoc tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(lastTokenIndex);
98                     if (tokenOrEmbeddingContainer != null &&
99                         (lastTokenOffset = tokenList.tokenOffset(lastTokenIndex)) < limitEndOffset
100                     ) {
101                         lastToken = LexerUtilsConstants.token(tokenOrEmbeddingContainer);
102                         limitStartIndex = lastTokenIndex;
103                         limitEndIndex = Integer.MAX_VALUE; // To be computed later
104
} // Otherwise limitStartIndex and limitEndIndex remain zero => no tokens
105

106                 } else { // Check if the token is not below end offset limit
107
if (limitEndOffset == Integer.MAX_VALUE || lastTokenOffset < limitEndOffset) {
108                         limitStartIndex = lastTokenIndex;
109                         limitEndIndex = Integer.MAX_VALUE; // To be computed later
110
} // Otherwise limitStartIndex and limitEndIndex remain zero => no tokens
111
}
112             } // Otherwise limitStartIndex and limitEndIndex remain zero => no tokens
113

114         } else {// Lower bound is zero => limitStartIndex is zero
115
// Check first token (done here for simpler tokenCount() etc.)
116
Object JavaDoc tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(0);
117             if (tokenOrEmbeddingContainer != null && (lastTokenOffset = tokenList.tokenOffset(0)) < limitEndOffset) {
118                 lastToken = LexerUtilsConstants.token(tokenOrEmbeddingContainer); // lastTokenIndex remains zero
119
limitEndIndex = Integer.MAX_VALUE;
120             } // Otherwise limitEndIndex remain zero => no tokens
121
}
122     }
123
124     public TokenList<T> delegate() {
125         return tokenList;
126     }
127     
128     public int limitStartOffset() {
129         return limitStartOffset;
130     }
131     
132     public int limitEndOffset() {
133         return limitEndOffset;
134     }
135     
136     public Object JavaDoc tokenOrEmbeddingContainer(int index) {
137         if (limitStartIndex == -1) // No tokens
138
return null;
139         index += limitStartIndex; // Shift to underlying tokenList indices
140
if (limitEndIndex == Integer.MAX_VALUE) { // Not initialized yet
141
switch (index - lastTokenIndex) {
142                 case -1: // Prev to lastToken - must exist
143
if (index < limitStartIndex)
144                         return null;
145                     Object JavaDoc tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(index);
146                     AbstractToken<T> token = LexerUtilsConstants.token(tokenOrEmbeddingContainer);
147                     lastTokenIndex = index;
148                     // If the token list is continuous or the original token
149
// is flyweight (there cannot be a gap before flyweight token)
150
// the original offset can be just decreased
151
// by the fetched token's length.
152
if (tokenList.isContinuous() || lastToken.isFlyweight())
153                         lastTokenOffset = lastTokenOffset - token.length();
154                     else // Compute offset through tokenList
155
lastTokenOffset = tokenList.tokenOffset(index);
156                     lastToken = token;
157                     return tokenOrEmbeddingContainer;
158
159                 case 0: // Last token
160
return lastToken;
161
162                 case 1: // Next to lastToken
163
tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(index);
164                     if (tokenOrEmbeddingContainer != null) {
165                         token = LexerUtilsConstants.token(tokenOrEmbeddingContainer);
166                         // If the token list is continuous or the fetched token
167
// is flyweight (there cannot be a gap before flyweight token)
168
// the original offset can be just increased
169
// by the original token's length.
170
int tokenOffset;
171                         if (tokenList.isContinuous() || token.isFlyweight())
172                             tokenOffset = lastTokenOffset + lastToken.length();
173                         else // Offset must be recomputed
174
tokenOffset = tokenList.tokenOffset(index);
175                         // Check the offset to be below upper bound
176
if (tokenOffset < limitEndOffset) { // below upper bound
177
lastToken = token;
178                             lastTokenIndex = index;
179                             lastTokenOffset = tokenOffset;
180                             return tokenOrEmbeddingContainer;
181                         } // above upper bound
182
}
183                     limitEndIndex = index; // lastToken at prev index was valid so may assign this
184
return null;
185
186                 default: // Not related to lastToken
187
tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(index);
188                     if (tokenOrEmbeddingContainer != null) {
189                         int tokenOffset = tokenList.tokenOffset(index);
190                         // Check the offset to be below upper bound
191
if (tokenOffset < limitEndOffset) { // below upper offset bound
192
lastToken = LexerUtilsConstants.token(tokenOrEmbeddingContainer);
193                             lastTokenIndex = index;
194                             lastTokenOffset = tokenOffset;
195                             return tokenOrEmbeddingContainer;
196                         } // >=limitEndOffset
197
} // index too high
198
// As the null gets returned all the tokens that could
199
// possibly be lazily created would already got initialized anyway.
200
// Call tokenCount() to initialize limitEndIndex and not duplicate
201
// optimizations similar to the ones in TokenSequence
202
// for offset retrieval here.
203
tokenCount();
204                     return null;
205             }
206
207         } else { // limitEndIndex already inited (won't be -1 - checked above)
208
// As limitEndIndex is inited it will no longer use lastToken caching
209
// because TokenSequence will use its own similar caching for token offsets.
210
return (index < limitEndIndex)
211                 ? tokenList.tokenOrEmbeddingContainer(index)
212                 : null;
213         }
214     }
215
216     public int tokenOffset(int index) {
217         index += limitStartIndex;
218         if (index == lastTokenIndex) {
219             return lastTokenOffset;
220         }
221         return tokenList.tokenOffset(index);
222     }
223
224     public int tokenCount() {
225         if (limitEndIndex == Integer.MAX_VALUE) { // Not computed yet
226
// Position to lower offset but retain diff against exact limitEndOffset
227
int diff = move(limitEndOffset - 1);
228             assert (diff != Integer.MAX_VALUE); // Should already be handled in constructor
229
limitEndIndex = lastTokenIndex + 1; // add extra 1 to become end index
230
}
231         return limitEndIndex - limitStartIndex;
232     }
233
234     public int tokenCountCurrent() {
235         if (limitEndIndex != Integer.MAX_VALUE) // Handle no tokens properly
236
return tokenCount();
237         int tcc = tokenList.tokenCountCurrent(); // cannot be < limitStartIndex due to constructor
238
if (tokenOffset(tcc - 1 - limitStartIndex) >= limitEndOffset) // Above limit
239
return tokenCount();
240         return tcc - limitStartIndex;
241     }
242
243     public AbstractToken<T> replaceFlyToken(int index, AbstractToken<T> flyToken, int offset) {
244         return tokenList.replaceFlyToken(index + limitStartIndex, flyToken, offset);
245     }
246
247     public int modCount() {
248         return tokenList.modCount();
249     }
250
251     public LanguagePath languagePath() {
252         return tokenList.languagePath();
253     }
254
255     public int childTokenOffset(int rawOffset) {
256         throw new IllegalStateException JavaDoc("Unexpected call.");
257     }
258
259     public char childTokenCharAt(int rawOffset, int index) {
260         throw new IllegalStateException JavaDoc("Unexpected call.");
261     }
262
263     public void wrapToken(int index, EmbeddingContainer<T> embeddingContainer) {
264         tokenList.wrapToken(limitStartIndex + index, embeddingContainer);
265     }
266
267     public TokenList<? extends TokenId> root() {
268         return tokenList.root();
269     }
270
271     public TokenHierarchyOperation<?,? extends TokenId> tokenHierarchyOperation() {
272         return tokenList.tokenHierarchyOperation();
273     }
274     
275     public InputAttributes inputAttributes() {
276         return tokenList.inputAttributes();
277     }
278
279     public int lookahead(int index) {
280         // Can be used by LexerTestUtilities.lookahead()
281
return tokenList.lookahead(index);
282     }
283
284     public Object JavaDoc state(int index) {
285         return tokenList.state(index);
286     }
287
288     public boolean isContinuous() {
289         return tokenList.isContinuous();
290     }
291
292     public Set JavaDoc<T> skipTokenIds() {
293         return tokenList.skipTokenIds();
294     }
295     
296     private AbstractToken<T> token(int index) {
297         return LexerUtilsConstants.token(tokenList, index);
298     }
299     
300     /**
301      * Find the token index for the given offset and place it into lastTokenIndex.
302      * <br/>
303      * Diff between requested offset and offset of the token at lastTokenIndex
304      * is returned.
305      * <br/>
306      * Returns Integer.MAX_VALUE if there are no tokens in the underlying
307      * token list.
308      */

309     private int move(int offset) {
310         int tokenCount = tokenList.tokenCountCurrent(); // presently created token count
311
if (tokenCount == 0) { // no tokens yet -> attempt to create at least one
312
if (tokenList.tokenOrEmbeddingContainer(0) == null) { // really no tokens at all
313
// In this case the token sequence could not be positioned yet
314
// so no need to reset "index" or other vars
315
return Integer.MAX_VALUE;
316             }
317             // Re-get the present token count (could be created a chunk of tokens at once)
318
tokenCount = tokenList.tokenCountCurrent();
319         }
320
321         // tokenCount surely >0
322
lastTokenOffset = tokenList.tokenOffset(tokenCount - 1);
323         if (offset > lastTokenOffset) { // may need to create further tokens if they do not exist
324
// Force token list to create subsequent tokens
325
// Cannot subtract offset by each token's length because
326
// there may be gaps between tokens due to token id filter use.
327
lastToken = token(tokenCount - 1);
328             int tokenLength = lastToken.length();
329             while (offset >= lastTokenOffset + tokenLength) { // above present token
330
Object JavaDoc tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(tokenCount);
331                 if (tokenOrEmbeddingContainer != null) {
332                     lastToken = LexerUtilsConstants.token(tokenOrEmbeddingContainer);
333                     if (lastToken.isFlyweight()) { // need to use previous tokenLength
334
lastTokenOffset += tokenLength;
335                     } else { // non-flyweight token - retrieve offset
336
lastTokenOffset = tokenList.tokenOffset(tokenCount);
337                     }
338                     tokenLength = lastToken.length();
339                     tokenCount++;
340
341                 } else { // no more tokens => break
342
break;
343                 }
344             }
345             lastTokenIndex = tokenCount - 1;
346             return offset - lastTokenOffset;
347         }
348         
349         // The offset is within the currently recognized tokens
350
// Use binary search
351
int low = 0;
352         int high = tokenCount - 1;
353         
354         while (low <= high) {
355             int mid = (low + high) / 2;
356             int midStartOffset = tokenList.tokenOffset(mid);
357             
358             if (midStartOffset < offset) {
359                 low = mid + 1;
360             } else if (midStartOffset > offset) {
361                 high = mid - 1;
362             } else { // Token starting exactly at offset found
363
lastToken = token(mid);
364                 lastTokenIndex = mid;
365                 lastTokenOffset = midStartOffset;
366                 return 0; // right at the token begining
367
}
368         }
369         
370         // Not found exactly and high + 1 == low => high < low
371
// Check whether the token at "high" contains the offset
372
if (high < 0) { // could be -1
373
high = 0;
374         }
375         lastToken = token(high);
376         lastTokenOffset = tokenList.tokenOffset(high);
377         lastTokenIndex = high;
378         return offset - lastTokenOffset;
379     }
380
381 }
Popular Tags