From 65674c2065c46389fce97a322d7b132cfc19eb91 Mon Sep 17 00:00:00 2001 From: Nikita Fedkin Date: Sun, 28 Dec 2025 11:54:43 +0100 Subject: [PATCH 1/8] =?UTF-8?q?=D0=9E=D0=BF=D1=82=D0=B8=D0=BC=D0=B8=D0=B7?= =?UTF-8?q?=D0=B0=D1=86=D0=B8=D1=8F=20=D1=85=D1=80=D0=B0=D0=BD=D0=B5=D0=BD?= =?UTF-8?q?=D0=B8=D1=8F=20=D1=81=D0=B5=D0=BC=D0=B0=D0=BD=D1=82=D0=B8=D1=87?= =?UTF-8?q?=D0=B5=D1=81=D0=BA=D0=B8=D1=85=20=D1=82=D0=BE=D0=BA=D0=B5=D0=BD?= =?UTF-8?q?=D0=BE=D0=B2:=20=D0=B7=D0=B0=D0=BC=D0=B5=D0=BD=D0=B0=20List=20=D0=BD=D0=B0=20int[]=20=D0=B4=D0=BB=D1=8F=20=D0=BF?= =?UTF-8?q?=D0=BE=D0=B2=D1=8B=D1=88=D0=B5=D0=BD=D0=B8=D1=8F=20=D0=BF=D1=80?= =?UTF-8?q?=D0=BE=D0=B8=D0=B7=D0=B2=D0=BE=D0=B4=D0=B8=D1=82=D0=B5=D0=BB?= =?UTF-8?q?=D1=8C=D0=BD=D0=BE=D1=81=D1=82=D0=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../providers/SemanticTokensProvider.java | 160 ++++++++++++++---- 1 file changed, 126 insertions(+), 34 deletions(-) diff --git a/src/main/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProvider.java b/src/main/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProvider.java index dedb74b771d..86cec2afb02 100644 --- a/src/main/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProvider.java +++ b/src/main/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProvider.java @@ -91,9 +91,9 @@ private void onDestroy() { * Cached semantic token data associated with a document. * * @param uri URI of the document - * @param data token data list + * @param data token data as int array (more efficient than List) */ - private record CachedTokenData(URI uri, List data) { + private record CachedTokenData(URI uri, int[] data) { } /** @@ -110,14 +110,14 @@ public SemanticTokens getSemanticTokensFull( // Collect tokens from all suppliers in parallel var entries = collectTokens(documentContext); - // Build delta-encoded data - List data = toDeltaEncoded(entries); + // Build delta-encoded data as int array + int[] data = toDeltaEncodedArray(entries); // Generate a unique resultId and cache the data String resultId = generateResultId(); cacheTokenData(resultId, documentContext.getUri(), data); - return new SemanticTokens(resultId, data); + return new SemanticTokens(resultId, toList(data)); } /** @@ -137,8 +137,8 @@ public Either getSemanticTokensFullDelta( // Collect tokens from all suppliers in parallel var entries = collectTokens(documentContext); - // Build delta-encoded data - List currentData = toDeltaEncoded(entries); + // Build delta-encoded data as int array + int[] currentData = toDeltaEncodedArray(entries); // Generate new resultId String resultId = generateResultId(); @@ -146,7 +146,7 @@ public Either getSemanticTokensFullDelta( // If previous data is not available or belongs to a different document, return full tokens if (previousData == null || !previousData.uri().equals(documentContext.getUri())) { cacheTokenData(resultId, documentContext.getUri(), currentData); - return Either.forLeft(new SemanticTokens(resultId, currentData)); + return Either.forLeft(new SemanticTokens(resultId, toList(currentData))); } // Compute delta edits @@ -207,53 +207,142 @@ private static String generateResultId() { /** * Cache token data with the given resultId. */ - private void cacheTokenData(String resultId, URI uri, List data) { + private void cacheTokenData(String resultId, URI uri, int[] data) { tokenCache.put(resultId, new CachedTokenData(uri, data)); } /** * Compute edits to transform previousData into currentData. - * Uses a simple algorithm that produces a single edit covering the entire change. + *

+ * Учитывает структуру семантических токенов (группы по 5 элементов: deltaLine, deltaStart, length, type, modifiers) + * и смещение строк при вставке/удалении строк в документе. */ - private static List computeEdits(List previousData, List currentData) { - // Find the first differing index - int minSize = Math.min(previousData.size(), currentData.size()); - int prefixMatch = 0; - while (prefixMatch < minSize && previousData.get(prefixMatch).equals(currentData.get(prefixMatch))) { - prefixMatch++; + private static List computeEdits(int[] prev, int[] curr) { + final int TOKEN_SIZE = 5; + + int prevTokenCount = prev.length / TOKEN_SIZE; + int currTokenCount = curr.length / TOKEN_SIZE; + + if (prevTokenCount == 0 && currTokenCount == 0) { + return List.of(); } - // If both are identical, return empty edits - if (prefixMatch == previousData.size() && prefixMatch == currentData.size()) { + // Находим первый отличающийся токен и одновременно вычисляем сумму deltaLine для prefix + int firstDiffToken = 0; + int prefixAbsLine = 0; + int minTokens = Math.min(prevTokenCount, currTokenCount); + + outer: + for (int i = 0; i < minTokens; i++) { + int base = i * TOKEN_SIZE; + for (int j = 0; j < TOKEN_SIZE; j++) { + if (prev[base + j] != curr[base + j]) { + firstDiffToken = i; + break outer; + } + } + prefixAbsLine += prev[base]; // накапливаем deltaLine + firstDiffToken = i + 1; + } + + // Если все токены одинаковые + if (firstDiffToken == minTokens && prevTokenCount == currTokenCount) { return List.of(); } - // Find the last differing index (from the end) - int suffixMatch = 0; - while (suffixMatch < minSize - prefixMatch - && previousData.get(previousData.size() - 1 - suffixMatch) - .equals(currentData.get(currentData.size() - 1 - suffixMatch))) { - suffixMatch++; + // Вычисляем смещение строк инкрементально от prefixAbsLine + int prevSuffixAbsLine = prefixAbsLine; + for (int i = firstDiffToken; i < prevTokenCount; i++) { + prevSuffixAbsLine += prev[i * TOKEN_SIZE]; } + int currSuffixAbsLine = prefixAbsLine; + for (int i = firstDiffToken; i < currTokenCount; i++) { + currSuffixAbsLine += curr[i * TOKEN_SIZE]; + } + int lineOffset = currSuffixAbsLine - prevSuffixAbsLine; + + // Находим последний отличающийся токен с учётом смещения строк + int suffixMatchTokens = findSuffixMatchWithOffset(prev, curr, firstDiffToken, lineOffset, TOKEN_SIZE); + + // Вычисляем границы редактирования + int deleteEndToken = prevTokenCount - suffixMatchTokens; + int insertEndToken = currTokenCount - suffixMatchTokens; - // Calculate the range to replace - int deleteStart = prefixMatch; - int deleteCount = previousData.size() - prefixMatch - suffixMatch; - int insertEnd = currentData.size() - suffixMatch; + int deleteStart = firstDiffToken * TOKEN_SIZE; + int deleteCount = (deleteEndToken - firstDiffToken) * TOKEN_SIZE; + int insertEnd = insertEndToken * TOKEN_SIZE; + + if (deleteCount == 0 && deleteStart == insertEnd) { + return List.of(); + } - // Extract the data to insert - List insertData = currentData.subList(prefixMatch, insertEnd); + // Создаём список для вставки из среза массива + List insertData = toList(Arrays.copyOfRange(curr, deleteStart, insertEnd)); var edit = new SemanticTokensEdit(); edit.setStart(deleteStart); edit.setDeleteCount(deleteCount); if (!insertData.isEmpty()) { - edit.setData(new ArrayList<>(insertData)); + edit.setData(insertData); } return List.of(edit); } + /** + * Находит количество совпадающих токенов с конца, учитывая смещение строк. + *

+ * При дельта-кодировании токены после точки вставки идентичны, + * кроме первого токена, у которого deltaLine смещён на lineOffset. + */ + private static int findSuffixMatchWithOffset(int[] prev, int[] curr, int firstDiffToken, int lineOffset, int tokenSize) { + int prevTokenCount = prev.length / tokenSize; + int currTokenCount = curr.length / tokenSize; + + int maxPrevSuffix = prevTokenCount - firstDiffToken; + int maxCurrSuffix = currTokenCount - firstDiffToken; + int maxSuffix = Math.min(maxPrevSuffix, maxCurrSuffix); + + int suffixMatch = 0; + boolean foundBoundary = false; + + for (int i = 0; i < maxSuffix; i++) { + int prevIdx = (prevTokenCount - 1 - i) * tokenSize; + int currIdx = (currTokenCount - 1 - i) * tokenSize; + + // Сначала проверяем все поля кроме deltaLine + boolean otherFieldsMatch = true; + for (int j = 1; j < tokenSize; j++) { + if (prev[prevIdx + j] != curr[currIdx + j]) { + otherFieldsMatch = false; + break; + } + } + + if (!otherFieldsMatch) { + break; + } + + // Теперь проверяем deltaLine + int prevDeltaLine = prev[prevIdx]; + int currDeltaLine = curr[currIdx]; + + if (prevDeltaLine == currDeltaLine) { + // Полное совпадение + suffixMatch++; + } else if (!foundBoundary && currDeltaLine - prevDeltaLine == lineOffset) { + // Граничный токен — deltaLine отличается ровно на lineOffset + suffixMatch++; + foundBoundary = true; + } else { + // Не совпадает + break; + } + } + + return suffixMatch; + } + /** * Collect tokens from all suppliers in parallel using ForkJoinPool. */ @@ -269,7 +358,7 @@ private List collectTokens(DocumentContext documentContext) .join(); } - private static List toDeltaEncoded(List entries) { + private static int[] toDeltaEncodedArray(List entries) { // de-dup and sort Set uniq = new HashSet<>(entries); List sorted = new ArrayList<>(uniq); @@ -300,7 +389,10 @@ private static List toDeltaEncoded(List entries) { first = false; } - // Convert to List for LSP4J API - return Arrays.stream(data).boxed().toList(); + return data; + } + + private static List toList(int[] array) { + return Arrays.stream(array).boxed().toList(); } } From 1c03de3555ad0f16455b9f44077ff4d9aeff68e9 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 28 Dec 2025 10:58:29 +0000 Subject: [PATCH 2/8] Initial plan From 529b40d36703a5ae01c464c382fa7273ed48d144 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 28 Dec 2025 11:06:32 +0000 Subject: [PATCH 3/8] Initial analysis and test case for inline text insertion Co-authored-by: nixel2007 <1132840+nixel2007@users.noreply.github.com> --- .../providers/SemanticTokensProviderTest.java | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java b/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java index 869726045ce..5d554d95f25 100644 --- a/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java +++ b/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java @@ -1341,6 +1341,64 @@ void deltaWithLineInsertedInMiddle_shouldReturnOptimalDelta() { assertThat(editSize).isLessThan(originalDataSize); } + @Test + void deltaWithTextInsertedOnSameLine_shouldReturnOptimalDelta() { + // given - simulate inserting text on the same line without line breaks + // This tests the case raised by @nixel2007: text insertion without newline + String bsl1 = """ + Перем А; + """; + + String bsl2 = """ + Перем Новая, А; + """; + + DocumentContext context1 = TestUtils.getDocumentContext(bsl1); + referenceIndexFiller.fill(context1); + TextDocumentIdentifier textDocId1 = TestUtils.getTextDocumentIdentifier(context1.getUri()); + SemanticTokens tokens1 = provider.getSemanticTokensFull(context1, new SemanticTokensParams(textDocId1)); + int originalDataSize = tokens1.getData().size(); + + // Decode and print tokens for debugging + var decoded1 = decode(tokens1.getData()); + System.out.println("Original tokens:"); + for (var t : decoded1) { + System.out.println(" line=" + t.line + ", start=" + t.start + ", len=" + t.length + ", type=" + t.type + ", mods=" + t.modifiers); + } + + DocumentContext context2 = TestUtils.getDocumentContext(context1.getUri(), bsl2); + referenceIndexFiller.fill(context2); + SemanticTokens tokens2 = provider.getSemanticTokensFull(context2, new SemanticTokensParams(textDocId1)); + var decoded2 = decode(tokens2.getData()); + System.out.println("New tokens:"); + for (var t : decoded2) { + System.out.println(" line=" + t.line + ", start=" + t.start + ", len=" + t.length + ", type=" + t.type + ", mods=" + t.modifiers); + } + + // when + var deltaParams = new SemanticTokensDeltaParams(textDocId1, tokens1.getResultId()); + var result = provider.getSemanticTokensFullDelta(context2, deltaParams); + + // then - should return delta, not full tokens + assertThat(result.isRight()).isTrue(); + var delta = result.getRight(); + assertThat(delta.getEdits()).isNotEmpty(); + + var edit = delta.getEdits().get(0); + System.out.println("Edit: start=" + edit.getStart() + ", deleteCount=" + edit.getDeleteCount() + + ", insertSize=" + (edit.getData() != null ? edit.getData().size() : 0)); + + // Verify the delta is computed correctly + // Original tokens: "Перем" (keyword), "А" (variable), ";" (operator) + // New tokens: "Перем" (keyword), "Новая" (variable), "," (operator), "А" (variable), ";" (operator) + // Since lineOffset=0 (no line change), the algorithm should detect this as an inline edit + // The "Перем" token should match, and ";" should match (though its deltaStart changes) + + // The edit should be smaller than resending all tokens + int editSize = edit.getDeleteCount() + (edit.getData() != null ? edit.getData().size() : 0); + assertThat(editSize).isLessThan(originalDataSize + tokens2.getData().size()); + } + // endregion } From 61a2db0250f649e3b8d364305ce38d611c770263 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 28 Dec 2025 11:08:17 +0000 Subject: [PATCH 4/8] Fix inline text insertion handling in delta calculation When text is inserted on the same line without line breaks (lineOffset=0), the boundary token may have a different deltaStart value while keeping the same deltaLine. The algorithm now accounts for this case by allowing deltaStart to differ for the boundary token when lineOffset=0. Addresses comment from @nixel2007 about text insertion without line breaks. Co-authored-by: nixel2007 <1132840+nixel2007@users.noreply.github.com> --- .../providers/SemanticTokensProvider.java | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/src/main/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProvider.java b/src/main/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProvider.java index 86cec2afb02..c5275e3f725 100644 --- a/src/main/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProvider.java +++ b/src/main/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProvider.java @@ -294,6 +294,8 @@ private static List computeEdits(int[] prev, int[] curr) { *

* При дельта-кодировании токены после точки вставки идентичны, * кроме первого токена, у которого deltaLine смещён на lineOffset. + * При вставке текста без перевода строки (lineOffset == 0), первый токен + * может иметь смещённый deltaStart. */ private static int findSuffixMatchWithOffset(int[] prev, int[] curr, int firstDiffToken, int lineOffset, int tokenSize) { int prevTokenCount = prev.length / tokenSize; @@ -310,9 +312,13 @@ private static int findSuffixMatchWithOffset(int[] prev, int[] curr, int firstDi int prevIdx = (prevTokenCount - 1 - i) * tokenSize; int currIdx = (currTokenCount - 1 - i) * tokenSize; - // Сначала проверяем все поля кроме deltaLine + // Для граничного токена при inline-редактировании (lineOffset == 0) + // разрешаем различие в deltaStart + int fieldsToCheck = (!foundBoundary && lineOffset == 0) ? 2 : 1; + + // Проверяем поля кроме deltaLine (и возможно deltaStart для граничного токена) boolean otherFieldsMatch = true; - for (int j = 1; j < tokenSize; j++) { + for (int j = fieldsToCheck; j < tokenSize; j++) { if (prev[prevIdx + j] != curr[currIdx + j]) { otherFieldsMatch = false; break; @@ -328,8 +334,16 @@ private static int findSuffixMatchWithOffset(int[] prev, int[] curr, int firstDi int currDeltaLine = curr[currIdx]; if (prevDeltaLine == currDeltaLine) { - // Полное совпадение + // Полное совпадение (или совпадение с учётом deltaStart при inline-редактировании) suffixMatch++; + // Если это был граничный токен при inline-редактировании, отмечаем его найденным + if (!foundBoundary && lineOffset == 0 && fieldsToCheck == 2) { + int prevDeltaStart = prev[prevIdx + 1]; + int currDeltaStart = curr[currIdx + 1]; + if (prevDeltaStart != currDeltaStart) { + foundBoundary = true; + } + } } else if (!foundBoundary && currDeltaLine - prevDeltaLine == lineOffset) { // Граничный токен — deltaLine отличается ровно на lineOffset suffixMatch++; From b3da273f07c571bcce236ee50ed8dcfd24fa7196 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 28 Dec 2025 11:16:22 +0000 Subject: [PATCH 5/8] Clean up debug statements from test code Co-authored-by: nixel2007 <1132840+nixel2007@users.noreply.github.com> --- .../providers/SemanticTokensProviderTest.java | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java b/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java index 5d554d95f25..bc66ef57b48 100644 --- a/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java +++ b/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java @@ -1358,22 +1358,10 @@ void deltaWithTextInsertedOnSameLine_shouldReturnOptimalDelta() { TextDocumentIdentifier textDocId1 = TestUtils.getTextDocumentIdentifier(context1.getUri()); SemanticTokens tokens1 = provider.getSemanticTokensFull(context1, new SemanticTokensParams(textDocId1)); int originalDataSize = tokens1.getData().size(); - - // Decode and print tokens for debugging - var decoded1 = decode(tokens1.getData()); - System.out.println("Original tokens:"); - for (var t : decoded1) { - System.out.println(" line=" + t.line + ", start=" + t.start + ", len=" + t.length + ", type=" + t.type + ", mods=" + t.modifiers); - } DocumentContext context2 = TestUtils.getDocumentContext(context1.getUri(), bsl2); referenceIndexFiller.fill(context2); SemanticTokens tokens2 = provider.getSemanticTokensFull(context2, new SemanticTokensParams(textDocId1)); - var decoded2 = decode(tokens2.getData()); - System.out.println("New tokens:"); - for (var t : decoded2) { - System.out.println(" line=" + t.line + ", start=" + t.start + ", len=" + t.length + ", type=" + t.type + ", mods=" + t.modifiers); - } // when var deltaParams = new SemanticTokensDeltaParams(textDocId1, tokens1.getResultId()); @@ -1384,10 +1372,6 @@ void deltaWithTextInsertedOnSameLine_shouldReturnOptimalDelta() { var delta = result.getRight(); assertThat(delta.getEdits()).isNotEmpty(); - var edit = delta.getEdits().get(0); - System.out.println("Edit: start=" + edit.getStart() + ", deleteCount=" + edit.getDeleteCount() + - ", insertSize=" + (edit.getData() != null ? edit.getData().size() : 0)); - // Verify the delta is computed correctly // Original tokens: "Перем" (keyword), "А" (variable), ";" (operator) // New tokens: "Перем" (keyword), "Новая" (variable), "," (operator), "А" (variable), ";" (operator) @@ -1395,7 +1379,8 @@ void deltaWithTextInsertedOnSameLine_shouldReturnOptimalDelta() { // The "Перем" token should match, and ";" should match (though its deltaStart changes) // The edit should be smaller than resending all tokens - int editSize = edit.getDeleteCount() + (edit.getData() != null ? edit.getData().size() : 0); + int editSize = delta.getEdits().get(0).getDeleteCount() + + (delta.getEdits().get(0).getData() != null ? delta.getEdits().get(0).getData().size() : 0); assertThat(editSize).isLessThan(originalDataSize + tokens2.getData().size()); } From f154def117e9ab08dc5df7a991d6f48230eb0fd2 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 28 Dec 2025 11:18:31 +0000 Subject: [PATCH 6/8] Improve code clarity with named constants and better assertions - Added DELTA_LINE_INDEX and DELTA_START_INDEX constants for clarity - Simplified redundant condition check - Improved test assertion to compare against new token size Co-authored-by: nixel2007 <1132840+nixel2007@users.noreply.github.com> --- .../providers/SemanticTokensProvider.java | 17 ++++++++++------- .../providers/SemanticTokensProviderTest.java | 5 ++--- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/src/main/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProvider.java b/src/main/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProvider.java index c5275e3f725..77001b0a594 100644 --- a/src/main/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProvider.java +++ b/src/main/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProvider.java @@ -298,6 +298,9 @@ private static List computeEdits(int[] prev, int[] curr) { * может иметь смещённый deltaStart. */ private static int findSuffixMatchWithOffset(int[] prev, int[] curr, int firstDiffToken, int lineOffset, int tokenSize) { + final int DELTA_LINE_INDEX = 0; + final int DELTA_START_INDEX = 1; + int prevTokenCount = prev.length / tokenSize; int currTokenCount = curr.length / tokenSize; @@ -314,11 +317,11 @@ private static int findSuffixMatchWithOffset(int[] prev, int[] curr, int firstDi // Для граничного токена при inline-редактировании (lineOffset == 0) // разрешаем различие в deltaStart - int fieldsToCheck = (!foundBoundary && lineOffset == 0) ? 2 : 1; + int firstFieldToCheck = (!foundBoundary && lineOffset == 0) ? DELTA_START_INDEX + 1 : DELTA_START_INDEX; // Проверяем поля кроме deltaLine (и возможно deltaStart для граничного токена) boolean otherFieldsMatch = true; - for (int j = fieldsToCheck; j < tokenSize; j++) { + for (int j = firstFieldToCheck; j < tokenSize; j++) { if (prev[prevIdx + j] != curr[currIdx + j]) { otherFieldsMatch = false; break; @@ -330,16 +333,16 @@ private static int findSuffixMatchWithOffset(int[] prev, int[] curr, int firstDi } // Теперь проверяем deltaLine - int prevDeltaLine = prev[prevIdx]; - int currDeltaLine = curr[currIdx]; + int prevDeltaLine = prev[prevIdx + DELTA_LINE_INDEX]; + int currDeltaLine = curr[currIdx + DELTA_LINE_INDEX]; if (prevDeltaLine == currDeltaLine) { // Полное совпадение (или совпадение с учётом deltaStart при inline-редактировании) suffixMatch++; // Если это был граничный токен при inline-редактировании, отмечаем его найденным - if (!foundBoundary && lineOffset == 0 && fieldsToCheck == 2) { - int prevDeltaStart = prev[prevIdx + 1]; - int currDeltaStart = curr[currIdx + 1]; + if (!foundBoundary && lineOffset == 0) { + int prevDeltaStart = prev[prevIdx + DELTA_START_INDEX]; + int currDeltaStart = curr[currIdx + DELTA_START_INDEX]; if (prevDeltaStart != currDeltaStart) { foundBoundary = true; } diff --git a/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java b/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java index bc66ef57b48..510142b6c3e 100644 --- a/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java +++ b/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java @@ -1357,7 +1357,6 @@ void deltaWithTextInsertedOnSameLine_shouldReturnOptimalDelta() { referenceIndexFiller.fill(context1); TextDocumentIdentifier textDocId1 = TestUtils.getTextDocumentIdentifier(context1.getUri()); SemanticTokens tokens1 = provider.getSemanticTokensFull(context1, new SemanticTokensParams(textDocId1)); - int originalDataSize = tokens1.getData().size(); DocumentContext context2 = TestUtils.getDocumentContext(context1.getUri(), bsl2); referenceIndexFiller.fill(context2); @@ -1378,10 +1377,10 @@ void deltaWithTextInsertedOnSameLine_shouldReturnOptimalDelta() { // Since lineOffset=0 (no line change), the algorithm should detect this as an inline edit // The "Перем" token should match, and ";" should match (though its deltaStart changes) - // The edit should be smaller than resending all tokens + // The edit should be significantly smaller than sending all new tokens int editSize = delta.getEdits().get(0).getDeleteCount() + (delta.getEdits().get(0).getData() != null ? delta.getEdits().get(0).getData().size() : 0); - assertThat(editSize).isLessThan(originalDataSize + tokens2.getData().size()); + assertThat(editSize).isLessThan(tokens2.getData().size()); } // endregion From 805fb8899687e52f0cd5e194ba13ec1b0bd9b721 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 28 Dec 2025 11:27:33 +0000 Subject: [PATCH 7/8] Add specific field assertions for decoded tokens in inline insertion test Added detailed assertions on decoded token fields (line, start, length, type, modifiers) for both original and modified token arrays to verify the exact token structure. Co-authored-by: nixel2007 <1132840+nixel2007@users.noreply.github.com> --- .../providers/SemanticTokensProviderTest.java | 25 ++++++++++++++++--- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java b/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java index 510142b6c3e..d470853e065 100644 --- a/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java +++ b/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java @@ -1358,10 +1358,30 @@ void deltaWithTextInsertedOnSameLine_shouldReturnOptimalDelta() { TextDocumentIdentifier textDocId1 = TestUtils.getTextDocumentIdentifier(context1.getUri()); SemanticTokens tokens1 = provider.getSemanticTokensFull(context1, new SemanticTokensParams(textDocId1)); + // Verify original tokens structure + var decoded1 = decode(tokens1.getData()); + var expected1 = List.of( + new ExpectedToken(0, 0, 5, SemanticTokenTypes.Keyword, "Перем"), + new ExpectedToken(0, 6, 1, SemanticTokenTypes.Variable, SemanticTokenModifiers.Definition, "А"), + new ExpectedToken(0, 7, 1, SemanticTokenTypes.Operator, ";") + ); + assertTokensMatch(decoded1, expected1); + DocumentContext context2 = TestUtils.getDocumentContext(context1.getUri(), bsl2); referenceIndexFiller.fill(context2); SemanticTokens tokens2 = provider.getSemanticTokensFull(context2, new SemanticTokensParams(textDocId1)); + // Verify modified tokens structure + var decoded2 = decode(tokens2.getData()); + var expected2 = List.of( + new ExpectedToken(0, 0, 5, SemanticTokenTypes.Keyword, "Перем"), + new ExpectedToken(0, 6, 5, SemanticTokenTypes.Variable, SemanticTokenModifiers.Definition, "Новая"), + new ExpectedToken(0, 11, 1, SemanticTokenTypes.Operator, ","), + new ExpectedToken(0, 13, 1, SemanticTokenTypes.Variable, SemanticTokenModifiers.Definition, "А"), + new ExpectedToken(0, 14, 1, SemanticTokenTypes.Operator, ";") + ); + assertTokensMatch(decoded2, expected2); + // when var deltaParams = new SemanticTokensDeltaParams(textDocId1, tokens1.getResultId()); var result = provider.getSemanticTokensFullDelta(context2, deltaParams); @@ -1372,11 +1392,8 @@ void deltaWithTextInsertedOnSameLine_shouldReturnOptimalDelta() { assertThat(delta.getEdits()).isNotEmpty(); // Verify the delta is computed correctly - // Original tokens: "Перем" (keyword), "А" (variable), ";" (operator) - // New tokens: "Перем" (keyword), "Новая" (variable), "," (operator), "А" (variable), ";" (operator) // Since lineOffset=0 (no line change), the algorithm should detect this as an inline edit - // The "Перем" token should match, and ";" should match (though its deltaStart changes) - + // The "Перем" token should match as prefix, and ";" should match as suffix (though its deltaStart changes) // The edit should be significantly smaller than sending all new tokens int editSize = delta.getEdits().get(0).getDeleteCount() + (delta.getEdits().get(0).getData() != null ? delta.getEdits().get(0).getData().size() : 0); From a967801bc9a68053b2ca30d432143df6e97c019c Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 28 Dec 2025 11:36:35 +0000 Subject: [PATCH 8/8] Add detailed delta edit assertions for inline insertion test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added specific assertions on delta edit fields: - start position (5 - after prefix match) - deleteCount (0 - no deletion needed due to suffix match) - data size (10 integers = 2 tokens to insert) The test now verifies that with lineOffset=0, the suffix matching correctly identifies "А" and ";" tokens despite different deltaStart positions. Co-authored-by: nixel2007 <1132840+nixel2007@users.noreply.github.com> --- .../providers/SemanticTokensProviderTest.java | 31 +++++++++++++++---- 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java b/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java index d470853e065..9c133d05b39 100644 --- a/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java +++ b/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java @@ -1390,13 +1390,32 @@ void deltaWithTextInsertedOnSameLine_shouldReturnOptimalDelta() { assertThat(result.isRight()).isTrue(); var delta = result.getRight(); assertThat(delta.getEdits()).isNotEmpty(); + assertThat(delta.getEdits()).hasSize(1); - // Verify the delta is computed correctly - // Since lineOffset=0 (no line change), the algorithm should detect this as an inline edit - // The "Перем" token should match as prefix, and ";" should match as suffix (though its deltaStart changes) - // The edit should be significantly smaller than sending all new tokens - int editSize = delta.getEdits().get(0).getDeleteCount() + - (delta.getEdits().get(0).getData() != null ? delta.getEdits().get(0).getData().size() : 0); + // Verify the delta edit details + // Original: [Перем, А, ;] - 3 tokens = 15 integers + // Modified: [Перем, Новая, ,, А, ;] - 5 tokens = 25 integers + // + // With lineOffset=0 inline edit handling: + // - Prefix match: "Перем" (1 token = 5 integers) + // - Suffix match: "А" and ";" (2 tokens = 10 integers) + // Note: "А" matches because the algorithm allows deltaStart to differ when lineOffset=0 + // - Edit deletes: nothing (0 integers) + // - Edit inserts: "Новая" and "," (2 tokens = 10 integers) + var edit = delta.getEdits().get(0); + assertThat(edit.getStart()) + .as("Edit should start after the prefix match (Перем = 5 integers)") + .isEqualTo(5); + assertThat(edit.getDeleteCount()) + .as("Edit should delete nothing (suffix match includes А and ;)") + .isEqualTo(0); + assertThat(edit.getData()) + .as("Edit should insert Новая and , tokens (2 tokens = 10 integers)") + .isNotNull() + .hasSize(10); + + // Verify the edit is optimal (smaller than sending all new tokens) + int editSize = edit.getDeleteCount() + edit.getData().size(); assertThat(editSize).isLessThan(tokens2.getData().size()); }