Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ private SemanticTokensWithRegistrationOptions getSemanticTokensProvider() {
fullOptions.setDelta(Boolean.TRUE);
semanticTokensProvider.setFull(fullOptions);

semanticTokensProvider.setRange(Boolean.FALSE);
semanticTokensProvider.setRange(Boolean.TRUE);
return semanticTokensProvider;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@
import org.eclipse.lsp4j.SemanticTokensDelta;
import org.eclipse.lsp4j.SemanticTokensDeltaParams;
import org.eclipse.lsp4j.SemanticTokensParams;
import org.eclipse.lsp4j.SemanticTokensRangeParams;
import org.eclipse.lsp4j.SymbolInformation;
import org.eclipse.lsp4j.TextDocumentClientCapabilities;
import org.eclipse.lsp4j.TextDocumentContentChangeEvent;
Expand Down Expand Up @@ -367,6 +368,19 @@ public CompletableFuture<Either<SemanticTokens, SemanticTokensDelta>> semanticTo
);
}

@Override
public CompletableFuture<SemanticTokens> semanticTokensRange(SemanticTokensRangeParams params) {
var documentContext = context.getDocumentUnsafe(params.getTextDocument().getUri());
if (documentContext == null) {
return CompletableFuture.completedFuture(null);
}

return withFreshDocumentContext(
documentContext,
() -> semanticTokensProvider.getSemanticTokensRange(documentContext, params)
);
}

@Override
public CompletableFuture<List<CallHierarchyIncomingCall>> callHierarchyIncomingCalls(
CallHierarchyIncomingCallsParams params
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,13 @@
import jakarta.annotation.PostConstruct;
import jakarta.annotation.PreDestroy;
import lombok.RequiredArgsConstructor;
import org.eclipse.lsp4j.Range;
import org.eclipse.lsp4j.SemanticTokens;
import org.eclipse.lsp4j.SemanticTokensDelta;
import org.eclipse.lsp4j.SemanticTokensDeltaParams;
import org.eclipse.lsp4j.SemanticTokensEdit;
import org.eclipse.lsp4j.SemanticTokensParams;
import org.eclipse.lsp4j.SemanticTokensRangeParams;
import org.eclipse.lsp4j.jsonrpc.messages.Either;
import org.springframework.context.event.EventListener;
import org.springframework.stereotype.Component;
Expand All @@ -57,7 +59,8 @@
/**
* Провайдер для предоставления семантических токенов.
* <p>
* Обрабатывает запросы {@code textDocument/semanticTokens/full} и {@code textDocument/semanticTokens/full/delta}.
* Обрабатывает запросы {@code textDocument/semanticTokens/full}, {@code textDocument/semanticTokens/full/delta}
* и {@code textDocument/semanticTokens/range}.
*
* @see <a href="https://microsoft.github.io/language-server-protocol/specifications/specification-current/#textDocument_semanticTokens">Semantic Tokens specification</a>
*/
Expand Down Expand Up @@ -164,6 +167,81 @@ public Either<SemanticTokens, SemanticTokensDelta> getSemanticTokensFullDelta(
return Either.forRight(delta);
}

/**
* Получить семантические токены для указанного диапазона документа.
*
* @param documentContext Контекст документа
* @param params Параметры запроса с диапазоном
* @return Семантические токены для указанного диапазона в дельта-кодированном формате
*/
public SemanticTokens getSemanticTokensRange(
DocumentContext documentContext,
SemanticTokensRangeParams params
) {
Range range = params.getRange();

// Collect tokens from all suppliers in parallel
var entries = collectTokens(documentContext);

// Filter tokens that fall within the specified range
var filteredEntries = filterTokensByRange(entries, range);

// Build delta-encoded data
List<Integer> data = toDeltaEncoded(filteredEntries);

// Range requests do not use resultId caching as per LSP specification
return new SemanticTokens(data);
}

/**
* Фильтрует токены, оставляя только те, которые попадают в указанный диапазон.
* <p>
* Токен считается попадающим в диапазон, если он хотя бы частично пересекается с ним.
*
* @param entries Список токенов
* @param range Диапазон для фильтрации
* @return Отфильтрованный список токенов
*/
private static List<SemanticTokenEntry> filterTokensByRange(List<SemanticTokenEntry> entries, Range range) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@copilot здесь могут быть миллионы токенов. Есть идеи по оптимизации?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added optimizations in f6f8d01:

  1. Parallel stream for large collections (>1000 tokens)
  2. Two-phase filtering: quick line-based pre-filter first (simple integer comparison), then detailed character checks only for boundary lines

This reduces complexity for typical viewport requests since most tokens can be eliminated by the fast line check alone.

int startLine = range.getStart().getLine();
int startChar = range.getStart().getCharacter();
int endLine = range.getEnd().getLine();
int endChar = range.getEnd().getCharacter();

return entries.stream()
.filter(token -> isTokenInRange(token, startLine, startChar, endLine, endChar))
.toList();
}

/**
* Проверяет, попадает ли токен в указанный диапазон.
* <p>
* Токен попадает в диапазон, если он хотя бы частично пересекается с ним.
*/
private static boolean isTokenInRange(
SemanticTokenEntry token,
int startLine,
int startChar,
int endLine,
int endChar
) {
int tokenLine = token.line();
int tokenStart = token.start();
int tokenEnd = tokenStart + token.length();

// Token ends before range starts
if (tokenLine < startLine || (tokenLine == startLine && tokenEnd <= startChar)) {
return false;
}

// Token starts after range ends
if (tokenLine > endLine || (tokenLine == endLine && tokenStart >= endChar)) {
return false;
}

return true;
}

/**
* Обрабатывает событие закрытия документа в контексте сервера.
* <p>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,15 @@
import com.github._1c_syntax.bsl.languageserver.references.ReferenceIndexFiller;
import com.github._1c_syntax.bsl.languageserver.util.CleanupContextBeforeClassAndAfterEachTestMethod;
import com.github._1c_syntax.bsl.languageserver.util.TestUtils;
import org.eclipse.lsp4j.Position;
import org.eclipse.lsp4j.Range;
import org.eclipse.lsp4j.SemanticTokenModifiers;
import org.eclipse.lsp4j.SemanticTokenTypes;
import org.eclipse.lsp4j.SemanticTokens;
import org.eclipse.lsp4j.SemanticTokensDeltaParams;
import org.eclipse.lsp4j.SemanticTokensLegend;
import org.eclipse.lsp4j.SemanticTokensParams;
import org.eclipse.lsp4j.SemanticTokensRangeParams;
import org.eclipse.lsp4j.TextDocumentIdentifier;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
Expand Down Expand Up @@ -1342,5 +1345,217 @@ void deltaWithLineInsertedInMiddle_shouldReturnOptimalDelta() {
}

// endregion

// region Range tokens tests

@Test
void rangeTokens_returnsOnlyTokensInRange() {
// given - multi-line document
String bsl = """
Перем А;
Перем Б;
Перем В;
Перем Г;
""";

DocumentContext documentContext = TestUtils.getDocumentContext(bsl);
referenceIndexFiller.fill(documentContext);
TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri());

// Request range covering lines 1-2 (0-based, end-exclusive: from line 1 to line 3 means lines 1 and 2)
Range range = new Range(new Position(1, 0), new Position(3, 0));
var params = new SemanticTokensRangeParams(textDocumentIdentifier, range);

// when
SemanticTokens tokens = provider.getSemanticTokensRange(documentContext, params);

// then
var decoded = decode(tokens.getData());

// Should contain tokens only from lines 1-2 (the range is end-exclusive, so line 3 is excluded)
assertThat(decoded).allMatch(t -> t.line >= 1 && t.line <= 2,
"All tokens should be within the requested range (lines 1-2)");

// Should not contain tokens from line 0 (before range) or line 3 (at or after range end)
assertThat(decoded).noneMatch(t -> t.line == 0 || t.line == 3,
"No tokens should be from lines outside the range");
}

@Test
void rangeTokens_singleLine() {
// given
String bsl = """
Перем А;
Перем Б;
Перем В;
""";

DocumentContext documentContext = TestUtils.getDocumentContext(bsl);
referenceIndexFiller.fill(documentContext);
TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri());

// Request range covering only line 1
Range range = new Range(new Position(1, 0), new Position(2, 0));
var params = new SemanticTokensRangeParams(textDocumentIdentifier, range);

// when
SemanticTokens tokens = provider.getSemanticTokensRange(documentContext, params);

// then
var decoded = decode(tokens.getData());

// Should contain tokens only from line 1
assertThat(decoded).allMatch(t -> t.line == 1,
"All tokens should be from line 1");

var expected = List.of(
new ExpectedToken(1, 0, 5, SemanticTokenTypes.Keyword, "Перем"),
new ExpectedToken(1, 6, 1, SemanticTokenTypes.Variable, SemanticTokenModifiers.Definition, "Б"),
new ExpectedToken(1, 7, 1, SemanticTokenTypes.Operator, ";")
);

assertContainsTokens(decoded, expected);
}

@Test
void rangeTokens_partialLine() {
// given
String bsl = """
Процедура Тест()
Перем А; Перем Б;
КонецПроцедуры
""";

DocumentContext documentContext = TestUtils.getDocumentContext(bsl);
referenceIndexFiller.fill(documentContext);
TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri());

// Request range covering part of line 1 (middle section)
Range range = new Range(new Position(1, 8), new Position(1, 18));
var params = new SemanticTokensRangeParams(textDocumentIdentifier, range);

// when
SemanticTokens tokens = provider.getSemanticTokensRange(documentContext, params);

// then
var decoded = decode(tokens.getData());

// Should contain tokens that overlap with the range (A; and Перем Б)
assertThat(decoded).isNotEmpty();
assertThat(decoded).allMatch(t -> t.line == 1,
"All tokens should be from line 1");
}
Comment on lines 1523 to 1534
Copy link

Copilot AI Dec 28, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The test for partial line range filtering only verifies that non-empty tokens are returned and all are from the expected line, but it doesn't verify which specific tokens are included or excluded. Consider adding assertions similar to the single line test (lines 1489-1495) to verify that the expected tokens such as "А", "Перем", and "Б" are present within the range, and that tokens outside the range like the leading "Перем" at the start of the line are correctly excluded.

Copilot uses AI. Check for mistakes.

@Test
void rangeTokens_emptyRangeReturnsNoTokens() {
// given
String bsl = """
Перем А;
""";

DocumentContext documentContext = TestUtils.getDocumentContext(bsl);
referenceIndexFiller.fill(documentContext);
TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri());

// Request range at end of document where no tokens exist
Range range = new Range(new Position(5, 0), new Position(10, 0));
var params = new SemanticTokensRangeParams(textDocumentIdentifier, range);

// when
SemanticTokens tokens = provider.getSemanticTokensRange(documentContext, params);

// then
assertThat(tokens.getData()).isEmpty();
}

@Test
void rangeTokens_fullDocumentRange() {
// given
String bsl = """
Перем А;
Перем Б;
""";

DocumentContext documentContext = TestUtils.getDocumentContext(bsl);
referenceIndexFiller.fill(documentContext);
TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri());

// Request range covering entire document
Range range = new Range(new Position(0, 0), new Position(100, 0));
var params = new SemanticTokensRangeParams(textDocumentIdentifier, range);

// when
SemanticTokens rangeTokens = provider.getSemanticTokensRange(documentContext, params);
SemanticTokens fullTokens = provider.getSemanticTokensFull(
documentContext,
new SemanticTokensParams(textDocumentIdentifier)
);

// then - range tokens should contain same tokens as full request
assertThat(rangeTokens.getData()).isEqualTo(fullTokens.getData());
}

@Test
void rangeTokens_doesNotHaveResultId() {
// given - per LSP spec, range requests don't use resultId
String bsl = """
Перем А;
""";

DocumentContext documentContext = TestUtils.getDocumentContext(bsl);
referenceIndexFiller.fill(documentContext);
TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri());

Range range = new Range(new Position(0, 0), new Position(1, 0));
var params = new SemanticTokensRangeParams(textDocumentIdentifier, range);

// when
SemanticTokens tokens = provider.getSemanticTokensRange(documentContext, params);

// then - resultId should be null for range requests
assertThat(tokens.getResultId()).isNull();
}

@Test
void rangeTokens_withSdblQuery() {
// given - test that SDBL tokens are also filtered correctly
String bsl = """
Процедура Тест()
А = 1;
Запрос = "Выбрать * из Справочник.Контрагенты";
Б = 2;
КонецПроцедуры
""";

DocumentContext documentContext = TestUtils.getDocumentContext(bsl);
referenceIndexFiller.fill(documentContext);
TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri());

// Request range covering only line 2 (the query line)
Range range = new Range(new Position(2, 0), new Position(3, 0));
var params = new SemanticTokensRangeParams(textDocumentIdentifier, range);

// when
SemanticTokens tokens = provider.getSemanticTokensRange(documentContext, params);

// then
var decoded = decode(tokens.getData());

// Should contain tokens only from line 2
assertThat(decoded).isNotEmpty();
assertThat(decoded).allMatch(t -> t.line == 2,
"All tokens should be from line 2");

// Should contain SDBL keyword tokens (Выбрать, из) as well as Namespace and Class
int keywordTypeIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Keyword);
int namespaceTypeIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Namespace);
int classTypeIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Class);

assertThat(decoded).anyMatch(t -> t.type == keywordTypeIdx, "Should contain keyword tokens");
assertThat(decoded).anyMatch(t -> t.type == namespaceTypeIdx, "Should contain namespace token for Справочник");
assertThat(decoded).anyMatch(t -> t.type == classTypeIdx, "Should contain class token for Контрагенты");
}

// endregion
Comment on lines 1427 to 1663
Copy link

Copilot AI Dec 28, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider testing the parallel processing path explicitly. The current tests don't verify behavior when the number of tokens exceeds PARALLEL_PROCESSING_THRESHOLD (1000). While the parallel stream should produce the same results as a sequential stream for filtering operations, it would be valuable to have a test that generates a large document with more than 1000 tokens to ensure the parallel path works correctly and produces deterministic results.

Copilot uses AI. Check for mistakes.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@copilot вместо проверок на contains сделай тест на точное соответствие decoded token

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done in bc85d81. All range tests now use assertTokensMatch for exact token verification instead of assertContainsTokens and partial assertions.

}