1 19 20 package org.netbeans.lib.lexer; 21 22 import org.netbeans.api.lexer.TokenId; 23 import org.netbeans.spi.lexer.LexerInput; 24 25 31 32 public final class PreprocessedTextLexerInputOperation<T extends TokenId> extends TextLexerInputOperation<T> { 33 34 private final PreprocessedTextStorage preprocessedText; 35 36 private int prepStartIndex; 37 38 private int prepEndIndex; 39 40 private int tokenStartRawLengthShift; 41 42 private int lastRawLengthShift; 43 44 private int tokenEndRawLengthShift; 45 46 public PreprocessedTextLexerInputOperation(TokenList<T> tokenList, PreprocessedTextStorage prepText) { 47 this(tokenList, 0, null, prepText, 0, 0, prepText.length()); 48 } 49 50 public PreprocessedTextLexerInputOperation(TokenList<T> tokenList, int tokenIndex, 51 Object lexerRestartState, PreprocessedTextStorage prepText, int prepTextStartOffset, 52 int startOffset, int endOffset) { 53 super(tokenList, tokenIndex, lexerRestartState, prepText, 54 prepTextStartOffset, startOffset, endOffset); 55 this.preprocessedText = prepText; 56 int index = startOffset - prepTextStartOffset; 57 if (index > 0) { 58 tokenStartRawLengthShift = preprocessedText.rawLengthShift(index); 59 lastRawLengthShift = tokenStartRawLengthShift; 60 } 61 preprocessingLevelCount++; } 63 64 public int deepRawLength(int length) { 65 return length + preprocessedText.rawLengthShift(tokenStartIndex() + length - 1) 66 - tokenStartRawLengthShift; 67 } 68 69 public int deepRawLengthShift(int index) { 70 return preprocessedText.rawLengthShift(tokenStartIndex() + index) 71 - tokenStartRawLengthShift; 72 } 73 74 public int read(int index) { index += tokenStartIndex(); 76 if (index < readEndIndex()) { 77 int rls = preprocessedText.rawLengthShift(index); 79 if (rls != lastRawLengthShift) { lastRawLengthShift = rls; 81 if (prepStartIndex >= index) { prepStartIndex = index; 83 } 84 prepEndIndex = index + 1; 85 } 86 return preprocessedText.charAt(index); 87 } else { return LexerInput.EOF; 89 } 90 } 91 92 public void tokenRecognized(int tokenLength) { 93 super.tokenRecognized(tokenLength); 94 tokenEndRawLengthShift = preprocessedText.rawLengthShift( 95 tokenStartIndex() + tokenLength() - 1); 96 } 97 98 public void tokenApproved() { 99 tokenStartRawLengthShift += tokenEndRawLengthShift; 101 102 if (prepStartIndex != Integer.MAX_VALUE) { if (prepStartIndex < tokenLength()) { if (prepEndIndex <= tokenLength()) { prepStartIndex = Integer.MAX_VALUE; } else { prepStartIndex = 0; 108 prepEndIndex -= tokenLength(); 109 } 110 111 } else { prepStartIndex -= tokenLength(); 113 prepEndIndex -= tokenLength(); 114 } 115 } 116 super.tokenApproved(); 117 } 118 119 public void collectExtraPreprocessedChars(CharProvider.ExtraPreprocessedChars epc, 120 int prepStartIndex, int prepEndIndex, int topPrepEndIndex) { 121 if (prepStartIndex < tokenLength()) { int preCount = Math.max(prepStartIndex - this.prepStartIndex, 0); 124 int postCount; 126 if (this.prepEndIndex > tokenLength()) { 127 postCount = tokenLength() - prepEndIndex; 128 if (postCount > 0) { 129 int i = tokenLength() - 2; 130 while (--i >= prepStartIndex && postCount > 0 134 && preprocessedText.rawLengthShift(i + tokenStartIndex()) == tokenEndRawLengthShift 135 ) { postCount--; 137 } 138 } else postCount = 0; 140 141 } else { postCount = this.prepEndIndex - prepEndIndex; 143 } 144 145 assert (preCount >= 0 && postCount >= 0); 146 epc.ensureExtraLength(preCount + postCount); 147 while (--preCount >= 0) { 148 epc.insert(readExisting(prepStartIndex - 1), deepRawLength(prepStartIndex) - prepStartIndex); 149 prepStartIndex--; 150 } 151 while (--postCount >= 0) { 152 epc.append(readExisting(prepEndIndex), deepRawLength(prepEndIndex) - topPrepEndIndex); 153 prepEndIndex++; 154 topPrepEndIndex++; 155 } 156 } 157 } 158 159 } 160 | Popular Tags |